From 2020a1edcd2f39ab56ef9e2367e0b36cfd2584fa Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 19 Sep 2023 13:46:02 -0700 Subject: [PATCH 01/55] test(i): Remap alias fields when refreshing test documents (#1897) ## Relevant issue(s) N/A ## Description This PR fixes a bug that causes documents to not be correctly refreshed when they are created using alias fields. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test:changes` Specify the platform(s) on which this was tested: - MacOS --- tests/integration/utils2.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 622478f513..cc2a4c4749 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -900,6 +900,11 @@ func refreshDocuments( // Just use the collection from the first relevant node, as all will be the same for this // purpose. collection := getNodeCollections(action.NodeID, s.collections)[0][action.CollectionID] + if err := doc.RemapAliasFieldsAndDockey(collection.Schema().Fields); err != nil { + // If an err has been returned, ignore it - it may be expected and if not + // the test will fail later anyway + continue + } // The document may have been mutated by other actions, so to be sure we have the latest // version without having to worry about the individual update mechanics we fetch it. From b8567c2a32e3d73ad8626e6dd3eaa78b12068eae Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 19 Sep 2023 14:51:12 -0700 Subject: [PATCH 02/55] ci: Parallelize change detector (#1871) ## Relevant issue(s) Resolves #1436 ## Description This PR allows the change detector to run in parallel with other tests. There are also a few other improvements to the change detector: - change detector logic is moved to a new `tests/change_detector` package - change detector specific logic in `tests/integration` is now more obvious - change detector now operates on distinct `source` and `target` branches - this enables you to test branches outside of the currently checked out branch - change detector manages its own environment variables. - running `go test ./tests/change_detector/...` will work without env variables - change detector temp directories are now all cleaned up after running Todo before merging: - [x] update default branch and repository - [x] document change detector data format changes ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test:changes` Specify the platform(s) on which this was tested: - MacOS --- Makefile | 4 +- .../i1436-no-change-tests-updated.md | 3 + tests/change_detector/README.md | 15 + tests/change_detector/change_detector_test.go | 205 ++++++++++++ tests/change_detector/utils.go | 102 ++++++ tests/integration/change_detector.go | 304 ------------------ tests/integration/utils2.go | 188 +++++------ 7 files changed, 404 insertions(+), 417 deletions(-) create mode 100644 docs/data_format_changes/i1436-no-change-tests-updated.md create mode 100644 tests/change_detector/README.md create mode 100644 tests/change_detector/change_detector_test.go create mode 100644 tests/change_detector/utils.go delete mode 100644 tests/integration/change_detector.go diff --git a/Makefile b/Makefile index 21fcfcedf1..60350a6046 100644 --- a/Makefile +++ b/Makefile @@ -34,6 +34,7 @@ TEST_FLAGS=-race -shuffle=on -timeout 300s PLAYGROUND_DIRECTORY=playground LENS_TEST_DIRECTORY=tests/integration/schema/migrations CLI_TEST_DIRECTORY=tests/integration/cli +CHANGE_DETECTOR_TEST_DIRECTORY=tests/change_detector DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY) -e $(CLI_TEST_DIRECTORY)) default: @@ -294,8 +295,7 @@ test\:coverage-html: .PHONY: test\:changes test\:changes: - @$(MAKE) deps:lens - env DEFRA_DETECT_DATABASE_CHANGES=true DEFRA_CLIENT_GO=true gotestsum -- ./... -shuffle=on -p 1 + gotestsum --format testname -- ./$(CHANGE_DETECTOR_TEST_DIRECTORY)/... --tags change_detector .PHONY: validate\:codecov validate\:codecov: diff --git a/docs/data_format_changes/i1436-no-change-tests-updated.md b/docs/data_format_changes/i1436-no-change-tests-updated.md new file mode 100644 index 0000000000..89f7305133 --- /dev/null +++ b/docs/data_format_changes/i1436-no-change-tests-updated.md @@ -0,0 +1,3 @@ +# Parallel change detector + +This is is not a breaking change. The change detector has been updated to allow for parallel test runs. There were changes to environment variables and test setup that makes the previous version of the change detector incompatible with this version. diff --git a/tests/change_detector/README.md b/tests/change_detector/README.md new file mode 100644 index 0000000000..4d824fb60f --- /dev/null +++ b/tests/change_detector/README.md @@ -0,0 +1,15 @@ +# Change Detector + +The change detector is used to detect data format changes between versions of DefraDB. + +## How it works + +The tests run using a `source` and `target` branch of DefraDB. Each branch is cloned into a temporary directory and dependencies are installed. + +The test runner executes all of the common test packages available in the `source` and `target` tests directory. + +For each test package execution the following steps occur: + +- Create a temporary data directory. This is used to share data between `source` and `target`. +- Run the `source` version in setup only mode. This creates test fixtures in the shared data directory. +- Run the `target` version in change detector mode. This skips the setup and executes the tests using the shared data directory. diff --git a/tests/change_detector/change_detector_test.go b/tests/change_detector/change_detector_test.go new file mode 100644 index 0000000000..ac9bc1a23f --- /dev/null +++ b/tests/change_detector/change_detector_test.go @@ -0,0 +1,205 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build change_detector + +package change_detector + +import ( + "fmt" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChanges(t *testing.T) { + sourceRepoDir := t.TempDir() + execClone(t, sourceRepoDir, Repository, SourceBranch) + execMakeDeps(t, sourceRepoDir) + + var targetRepoDir string + if TargetBranch == "" { + // default to the local branch + out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + require.NoError(t, err, string(out)) + targetRepoDir = strings.TrimSpace(string(out)) + } else { + // check out the target branch + targetRepoDir = t.TempDir() + execClone(t, targetRepoDir, Repository, TargetBranch) + execMakeDeps(t, targetRepoDir) + } + + if checkIfDatabaseFormatChangesAreDocumented(t, sourceRepoDir, targetRepoDir) { + t.Skip("skipping test with documented database format changes") + } + + targetRepoTestDir := filepath.Join(targetRepoDir, "tests", "integration") + targetRepoPkgList := execList(t, targetRepoTestDir) + + sourceRepoTestDir := filepath.Join(sourceRepoDir, "tests", "integration") + sourceRepoPkgList := execList(t, sourceRepoTestDir) + + sourceRepoPkgMap := make(map[string]bool) + for _, pkg := range sourceRepoPkgList { + sourceRepoPkgMap[pkg] = true + } + + for _, pkg := range targetRepoPkgList { + pkgName := strings.TrimPrefix(pkg, "github.com/sourcenetwork/defradb/") + t.Run(pkgName, func(t *testing.T) { + if pkg == "" || !sourceRepoPkgMap[pkg] { + t.Skip("skipping unknown or new test package") + } + + t.Parallel() + dataDir := t.TempDir() + + sourceTestPkg := filepath.Join(sourceRepoDir, pkgName) + execTest(t, sourceTestPkg, dataDir, true) + + targetTestPkg := filepath.Join(targetRepoDir, pkgName) + execTest(t, targetTestPkg, dataDir, false) + }) + } +} + +// execList returns a list of all packages in the given directory. +func execList(t *testing.T, dir string) []string { + cmd := exec.Command("go", "list", "./...") + cmd.Dir = dir + + out, err := cmd.Output() + require.NoError(t, err, string(out)) + + return strings.Split(string(out), "\n") +} + +// execTest runs the tests in the given directory and sets the data +// directory and setup only environment variables. +func execTest(t *testing.T, dir, dataDir string, setupOnly bool) { + cmd := exec.Command("go", "test", ".", "-count", "1", "-v") + cmd.Dir = dir + cmd.Env = append( + os.Environ(), + fmt.Sprintf("%s=%s", enableEnvName, "true"), + fmt.Sprintf("%s=%s", rootDataDirEnvName, dataDir), + ) + + if setupOnly { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", setupOnlyEnvName, "true")) + } + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +// execClone clones the repo from the given url and branch into the directory. +func execClone(t *testing.T, dir, url, branch string) { + cmd := exec.Command( + "git", + "clone", + "--single-branch", + "--branch", branch, + "--depth", "1", + url, + dir, + ) + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +// execMakeDeps runs make:deps in the given directory. +func execMakeDeps(t *testing.T, dir string) { + cmd := exec.Command("make", "deps:lens") + cmd.Dir = dir + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +func checkIfDatabaseFormatChangesAreDocumented(t *testing.T, sourceDir, targetDir string) bool { + sourceChanges, ok := getDatabaseFormatDocumentation(t, sourceDir, false) + require.True(t, ok, "Documentation directory not found") + + changes := make(map[string]struct{}, len(sourceChanges)) + for _, f := range sourceChanges { + // Note: we assume flat directory for now - sub directories are not expanded + changes[f.Name()] = struct{}{} + } + + targetChanges, ok := getDatabaseFormatDocumentation(t, targetDir, true) + require.True(t, ok, "Documentation directory not found") + + for _, f := range targetChanges { + if _, isChangeOld := changes[f.Name()]; !isChangeOld { + // If there is a new file in the directory then the change + // has been documented and the test should pass + return true + } + } + + return false +} + +func getDatabaseFormatDocumentation(t *testing.T, startPath string, allowDescend bool) ([]fs.DirEntry, bool) { + startInfo, err := os.Stat(startPath) + require.NoError(t, err) + + var currentDirectory string + if startInfo.IsDir() { + currentDirectory = startPath + } else { + currentDirectory = path.Dir(startPath) + } + + for { + directoryContents, err := os.ReadDir(currentDirectory) + require.NoError(t, err) + + for _, directoryItem := range directoryContents { + directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) + if directoryItem.Name() == documentationDirectoryName { + probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) + require.NoError(t, err) + + for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { + if path.Ext(possibleDocumentationItem.Name()) == ".md" { + // If the directory's name matches the expected, and contains .md files + // we assume it is the documentation directory + return probableFormatChangeDirectoryContents, true + } + } + } else { + if directoryItem.IsDir() { + childContents, directoryFound := getDatabaseFormatDocumentation(t, directoryItemPath, false) + if directoryFound { + return childContents, true + } + } + } + } + + if allowDescend { + // If not found in this directory, continue down the path + currentDirectory = path.Dir(currentDirectory) + require.True(t, currentDirectory != "." && currentDirectory != "/") + } else { + return []fs.DirEntry{}, false + } + } +} diff --git a/tests/change_detector/utils.go b/tests/change_detector/utils.go new file mode 100644 index 0000000000..4e6e938aa5 --- /dev/null +++ b/tests/change_detector/utils.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package change_detector + +import ( + "os" + "path" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + // Enabled is true when the change detector is running. + Enabled bool + // SetupOnly is true when the change detector is running in setup mode. + SetupOnly bool + // Repository is the url of the repository to run change detector on. + Repository string + // SourceBranch is the name of the source branch to run change detector on. + SourceBranch string + // TargetBranch is the name of the target branch to run change detector on. + TargetBranch string + // rootDatabaseDir is the shared database directory for running tests. + rootDatabaseDir string + // previousTestCaseTestName is the name of the previous test. + previousTestCaseTestName string +) + +const ( + repositoryEnvName = "DEFRA_CHANGE_DETECTOR_REPOSITORY" + sourceBranchEnvName = "DEFRA_CHANGE_DETECTOR_SOURCE_BRANCH" + targetBranchEnvName = "DEFRA_CHANGE_DETECTOR_TARGET_BRANCH" + setupOnlyEnvName = "DEFRA_CHANGE_DETECTOR_SETUP_ONLY" + rootDataDirEnvName = "DEFRA_CHANGE_DETECTOR_ROOT_DATA_DIR" + enableEnvName = "DEFRA_CHANGE_DETECTOR_ENABLE" +) + +const ( + defaultRepository = "https://github.com/sourcenetwork/defradb.git" + defaultSourceBranch = "develop" + documentationDirectoryName = "data_format_changes" +) + +func init() { + Enabled, _ = strconv.ParseBool(os.Getenv(enableEnvName)) + SetupOnly, _ = strconv.ParseBool(os.Getenv(setupOnlyEnvName)) + TargetBranch = os.Getenv(targetBranchEnvName) + rootDatabaseDir = os.Getenv(rootDataDirEnvName) + + if value, ok := os.LookupEnv(repositoryEnvName); ok { + Repository = value + } else { + Repository = defaultRepository + } + + if value, ok := os.LookupEnv(sourceBranchEnvName); ok { + SourceBranch = value + } else { + SourceBranch = defaultSourceBranch + } +} + +// DatabaseDir returns the database directory for change detector test. +func DatabaseDir(t testing.TB) string { + return path.Join(rootDatabaseDir, t.Name()) +} + +// PreTestChecks skips any test that can't be run by the change detector. +func PreTestChecks(t *testing.T, collectionNames []string) { + if !Enabled { + return + } + + if previousTestCaseTestName == t.Name() { + t.Skip("skipping duplicate test") + } + previousTestCaseTestName = t.Name() + + if len(collectionNames) == 0 { + t.Skip("skipping test with no collections") + } + + if SetupOnly { + return + } + + _, err := os.Stat(DatabaseDir(t)) + if os.IsNotExist(err) { + t.Skip("skipping new test package") + } + require.NoError(t, err) +} diff --git a/tests/integration/change_detector.go b/tests/integration/change_detector.go deleted file mode 100644 index 15f17fb16b..0000000000 --- a/tests/integration/change_detector.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package tests - -import ( - "context" - "fmt" - "io/fs" - "math/rand" - "os" - "os/exec" - "path" - "runtime" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -var skip bool - -func IsDetectingDbChanges() bool { - return DetectDbChanges -} - -// Returns true if test should pass early -func DetectDbChangesPreTestChecks( - t *testing.T, - collectionNames []string, -) bool { - if skip { - t.SkipNow() - } - - if previousTestCaseTestName == t.Name() { - // The database format changer currently only supports running the first test - // case, if a second case is detected we return early - return true - } - previousTestCaseTestName = t.Name() - - if areDatabaseFormatChangesDocumented { - // If we are checking that database formatting changes have been made and - // documented, and changes are documented, then the tests can all pass. - return true - } - - if len(collectionNames) == 0 { - // If the test doesn't specify any collections, then we can't use it to check - // the database format, so we skip it - t.SkipNow() - } - - if !SetupOnly { - dbDirectory := path.Join(rootDatabaseDir, t.Name()) - _, err := os.Stat(dbDirectory) - if os.IsNotExist(err) { - // This is a new test that does not exist in the target branch, we should - // skip it. - t.SkipNow() - } else { - require.NoError(t, err) - } - } - - return false -} - -func detectDbChangesInit(repository string, targetBranch string) { - badgerFile = true - badgerInMemory = false - - if SetupOnly { - // Only the primary test process should perform the setup below - return - } - - defraTempDir := path.Join(os.TempDir(), "defradb") - changeDetectorTempDir := path.Join(defraTempDir, "tests", "changeDetector") - - latestTargetCommitHash := getLatestCommit(repository, targetBranch) - detectDbChangesCodeDir = path.Join(changeDetectorTempDir, "code", latestTargetCommitHash) - r := rand.New(rand.NewSource(time.Now().Unix())) - randNumber := r.Int() - dbsDir := path.Join(changeDetectorTempDir, "dbs", fmt.Sprint(randNumber)) - - testPackagePath, isIntegrationTest := getTestPackagePath() - if !isIntegrationTest { - skip = true - return - } - rootDatabaseDir = path.Join(dbsDir, strings.ReplaceAll(testPackagePath, "/", "_")) - - _, err := os.Stat(detectDbChangesCodeDir) - // Warning - there is a race condition here, where if running multiple packages in - // parallel (as per default) against a new target commit multiple test pacakges will - // try and clone the target branch at the same time (and will fail). - // This could be solved by using a file lock or similar, however running the change - // detector in parallel is significantly slower than running it serially due to machine - // resource constraints, so I am leaving the race condition in and recommending running - // the change detector with the CLI args `-p 1` - if os.IsNotExist(err) { - cloneCmd := exec.Command( - "git", - "clone", - "-b", - targetBranch, - "--single-branch", - repository, - detectDbChangesCodeDir, - ) - cloneCmd.Stdout = os.Stdout - cloneCmd.Stderr = os.Stderr - err := cloneCmd.Run() - if err != nil { - panic(err) - } - } else if err != nil { - panic(err) - } else { - // Cache must be cleaned, or it might not run the test setup! - // Note: this also acts as a race condition if multiple build are running against the - // same target if this happens some tests might be silently skipped if the - // child-setup fails. Currently I think it is worth it for slightly faster build - // times, but feel very free to change this! - goTestCacheCmd := exec.Command("go", "clean", "-testcache") - goTestCacheCmd.Dir = detectDbChangesCodeDir - err = goTestCacheCmd.Run() - if err != nil { - panic(err) - } - } - - areDatabaseFormatChangesDocumented = checkIfDatabaseFormatChangesAreDocumented() - if areDatabaseFormatChangesDocumented { - // Dont bother doing anything if the changes are documented - return - } - - targetTestPackage := detectDbChangesCodeDir + "/tests/integration/" + testPackagePath - - _, err = os.Stat(targetTestPackage) - if os.IsNotExist(err) { - // This is a new test package, and thus the change detector is not applicable - // as the tests do not exist in the target branch. - skip = true - return - } else if err != nil { - panic(err) - } - - // If we are checking for database changes, and we are not seting up the database, - // then we must be in the main test process, and need to create a new process - // setting up the database for this test using the old branch We should not setup - // the database using the current branch/process - goTestCmd := exec.Command( - "go", - "test", - "./...", - "-v", - ) - - goTestCmd.Dir = targetTestPackage - goTestCmd.Env = os.Environ() - goTestCmd.Env = append( - goTestCmd.Env, - setupOnlyEnvName+"=true", - rootDBFilePathEnvName+"="+rootDatabaseDir, - ) - out, err := goTestCmd.Output() - if err != nil { - log.ErrorE(context.TODO(), string(out), err) - panic(err) - } -} - -// getTestPackagePath returns the path to the package currently under test, relative -// to `./tests/integration/`. Will return an empty string and false if the tests -// are not within that directory. -func getTestPackagePath() (string, bool) { - currentTestPackage, err := os.Getwd() - if err != nil { - panic(err) - } - - splitPath := strings.Split( - currentTestPackage, - "/tests/integration/", - ) - - if len(splitPath) != 2 { - return "", false - } - return splitPath[1], true -} - -func checkIfDatabaseFormatChangesAreDocumented() bool { - previousDbChangeFiles, targetDirFound := getDatabaseFormatDocumentation( - detectDbChangesCodeDir, - false, - ) - if !targetDirFound { - panic("Documentation directory not found") - } - - previousDbChanges := make(map[string]struct{}, len(previousDbChangeFiles)) - for _, f := range previousDbChangeFiles { - // Note: we assume flat directory for now - sub directories are not expanded - previousDbChanges[f.Name()] = struct{}{} - } - - _, thisFilePath, _, _ := runtime.Caller(0) - currentDbChanges, currentDirFound := getDatabaseFormatDocumentation(thisFilePath, true) - if !currentDirFound { - panic("Documentation directory not found") - } - - for _, f := range currentDbChanges { - if _, isChangeOld := previousDbChanges[f.Name()]; !isChangeOld { - // If there is a new file in the directory then the change - // has been documented and the test should pass - return true - } - } - - return false -} - -func getDatabaseFormatDocumentation(startPath string, allowDescend bool) ([]fs.DirEntry, bool) { - startInfo, err := os.Stat(startPath) - if err != nil { - panic(err) - } - - var currentDirectory string - if startInfo.IsDir() { - currentDirectory = startPath - } else { - currentDirectory = path.Dir(startPath) - } - - for { - directoryContents, err := os.ReadDir(currentDirectory) - if err != nil { - panic(err) - } - - for _, directoryItem := range directoryContents { - directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) - if directoryItem.Name() == documentationDirectoryName { - probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) - if err != nil { - panic(err) - } - for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { - if path.Ext(possibleDocumentationItem.Name()) == ".md" { - // If the directory's name matches the expected, and contains .md files - // we assume it is the documentation directory - return probableFormatChangeDirectoryContents, true - } - } - } else { - if directoryItem.IsDir() { - childContents, directoryFound := getDatabaseFormatDocumentation(directoryItemPath, false) - if directoryFound { - return childContents, true - } - } - } - } - - if allowDescend { - // If not found in this directory, continue down the path - currentDirectory = path.Dir(currentDirectory) - - if currentDirectory == "." || currentDirectory == "/" { - panic("Database documentation directory not found") - } - } else { - return []fs.DirEntry{}, false - } - } -} - -func getLatestCommit(repoName string, branchName string) string { - cmd := exec.Command("git", "ls-remote", repoName, "refs/heads/"+branchName) - result, err := cmd.Output() - if err != nil { - panic(err) - } - - // This is a tab, not a space! - seperator := "\t" - return strings.Split(string(result), seperator)[0] -} diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index cc2a4c4749..f722516445 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -15,7 +15,6 @@ import ( "encoding/json" "fmt" "os" - "path" "reflect" "strconv" "strings" @@ -36,22 +35,17 @@ import ( "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" + changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" ) const ( - clientGoEnvName = "DEFRA_CLIENT_GO" - clientHttpEnvName = "DEFRA_CLIENT_HTTP" - memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" - fileBadgerEnvName = "DEFRA_BADGER_FILE" - fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" - rootDBFilePathEnvName = "DEFRA_TEST_ROOT" - inMemoryEnvName = "DEFRA_IN_MEMORY" - setupOnlyEnvName = "DEFRA_SETUP_ONLY" - detectDbChangesEnvName = "DEFRA_DETECT_DATABASE_CHANGES" - repositoryEnvName = "DEFRA_CODE_REPOSITORY" - targetBranchEnvName = "DEFRA_TARGET_BRANCH" - mutationTypeEnvName = "DEFRA_MUTATION_TYPE" - documentationDirectoryName = "data_format_changes" + clientGoEnvName = "DEFRA_CLIENT_GO" + clientHttpEnvName = "DEFRA_CLIENT_HTTP" + memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" + fileBadgerEnvName = "DEFRA_BADGER_FILE" + fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" + inMemoryEnvName = "DEFRA_IN_MEMORY" + mutationTypeEnvName = "DEFRA_MUTATION_TYPE" ) type DatabaseType string @@ -108,42 +102,16 @@ var ( httpClient bool goClient bool mutationType MutationType + databaseDir string ) -const subscriptionTimeout = 1 * time.Second - -// Instantiating lenses is expensive, and our tests do not benefit from a large number of them, -// so we explicitly set it to a low value. -const lensPoolSize = 2 - -var databaseDir string -var rootDatabaseDir string - -/* -If this is set to true the integration test suite will instead of its normal profile do -the following: - -On [package] Init: - - Get the (local) latest commit from the target/parent branch // code assumes - git fetch has been done - - Check to see if a clone of that commit/branch is available in the temp dir, and - if not clone the target branch - - Check to see if there are any new .md files in the current branch's data_format_changes - dir (vs the target branch) - -For each test: - - If new documentation detected, pass the test and exit - - Create a new (test/auto-deleted) temp dir for defra to live/run in - - Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped - if test does not exist in target and is new to this branch) - - Run the test request and assert results (as per normal tests) using the current branch -*/ -var DetectDbChanges bool -var SetupOnly bool - -var detectDbChangesCodeDir string -var areDatabaseFormatChangesDocumented bool -var previousTestCaseTestName string +const ( + // subscriptionTimeout is the maximum time to wait for subscription results to be returned. + subscriptionTimeout = 1 * time.Second + // Instantiating lenses is expensive, and our tests do not benefit from a large number of them, + // so we explicitly set it to a low value. + lensPoolSize = 2 +) func init() { // We use environment variables instead of flags `go test ./...` throws for all packages @@ -153,22 +121,6 @@ func init() { badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) - DetectDbChanges, _ = strconv.ParseBool(os.Getenv(detectDbChangesEnvName)) - SetupOnly, _ = strconv.ParseBool(os.Getenv(setupOnlyEnvName)) - - var repositoryValue string - if value, ok := os.LookupEnv(repositoryEnvName); ok { - repositoryValue = value - } else { - repositoryValue = "https://github.com/sourcenetwork/defradb.git" - } - - var targetBranchValue string - if value, ok := os.LookupEnv(targetBranchEnvName); ok { - targetBranchValue = value - } else { - targetBranchValue = "develop" - } if value, ok := os.LookupEnv(mutationTypeEnvName); ok { mutationType = MutationType(value) @@ -179,21 +131,22 @@ func init() { mutationType = CollectionSaveMutationType } - // Default is to test go client type. if !goClient && !httpClient { + // Default is to test go client type. goClient = true } - // Default is to test all but filesystem db types. - if !badgerInMemory && !badgerFile && !inMemoryStore && !DetectDbChanges { + if changeDetector.Enabled { + // Change detector only uses badger file db type. + badgerFile = true + badgerInMemory = false + inMemoryStore = false + } else if !badgerInMemory && !badgerFile && !inMemoryStore { + // Default is to test all but filesystem db types. badgerFile = false badgerInMemory = true inMemoryStore = true } - - if DetectDbChanges { - detectDbChangesInit(repositoryValue, targetBranchValue) - } } // AssertPanic asserts that the code inside the specified PanicTestFunc panics. @@ -203,7 +156,7 @@ func init() { // // Usage: AssertPanic(t, func() { executeTestCase(t, test) }) func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { - if IsDetectingDbChanges() { + if changeDetector.Enabled { // The `assert.Panics` call will falsely fail if this test is executed during // a detect changes test run. t.Skip("Assert panic with the change detector is not currently supported.") @@ -218,74 +171,76 @@ func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { } func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} + opts := badgerds.Options{ + Options: badger.DefaultOptions("").WithInMemory(true), + } rootstore, err := badgerds.NewDatastore("", &opts) if err != nil { return nil, err } - - dbopts = append(dbopts, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) - db, err := db.NewDB(ctx, rootstore, dbopts...) if err != nil { return nil, err } - return db, nil } -func NewInMemoryDB(ctx context.Context) (client.DB, error) { - rootstore := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) +func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { + db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...) if err != nil { return nil, err } - return db, nil } -func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, string, error) { +func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) { var dbPath string - if databaseDir != "" { + switch { + case databaseDir != "": + // restarting database dbPath = databaseDir - } else if rootDatabaseDir != "" { - dbPath = path.Join(rootDatabaseDir, t.Name()) - } else { + + case changeDetector.Enabled: + // change detector + dbPath = changeDetector.DatabaseDir(t) + + default: + // default test case dbPath = t.TempDir() } - db, err := newBadgerFileDB(ctx, t, dbPath) - return db, dbPath, err -} - -func newBadgerFileDB(ctx context.Context, t testing.TB, path string) (client.DB, error) { - opts := badgerds.Options{Options: badger.DefaultOptions(path)} - rootstore, err := badgerds.NewDatastore(path, &opts) + opts := &badgerds.Options{ + Options: badger.DefaultOptions(dbPath), + } + rootstore, err := badgerds.NewDatastore(dbPath, opts) if err != nil { - return nil, err + return nil, "", err } - - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) + db, err := db.NewDB(ctx, rootstore, dbopts...) if err != nil { - return nil, err + return nil, "", err } - - return db, nil + return db, dbPath, err } // GetDatabase returns the database implementation for the current // testing state. The database type and client type on the test state // are used to select the datastore and client implementation to use. func GetDatabase(s *state) (cdb client.DB, path string, err error) { + dbopts := []db.Option{ + db.WithUpdateEvents(), + db.WithLensPoolSize(lensPoolSize), + } + switch s.dbt { case badgerIMType: - cdb, err = NewBadgerMemoryDB(s.ctx, db.WithUpdateEvents()) + cdb, err = NewBadgerMemoryDB(s.ctx, dbopts...) case badgerFileType: - cdb, path, err = NewBadgerFileDB(s.ctx, s.t) + cdb, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...) case defraIMType: - cdb, err = NewInMemoryDB(s.ctx) + cdb, err = NewInMemoryDB(s.ctx, dbopts...) default: err = fmt.Errorf("invalid database type: %v", s.dbt) @@ -323,11 +278,7 @@ func ExecuteTestCase( testCase TestCase, ) { collectionNames := getCollectionNames(testCase) - - if DetectDbChanges && DetectDbChangesPreTestChecks(t, collectionNames) { - return - } - + changeDetector.PreTestChecks(t, collectionNames) skipIfMutationTypeUnsupported(t, testCase.SupportedMutationTypes) var clients []ClientType @@ -370,7 +321,22 @@ func executeTestCase( dbt DatabaseType, clientType ClientType, ) { - log.Info(ctx, testCase.Description, logging.NewKV("Database", dbt)) + log.Info( + ctx, + testCase.Description, + logging.NewKV("badgerFile", badgerFile), + logging.NewKV("badgerInMemory", badgerInMemory), + logging.NewKV("inMemoryStore", inMemoryStore), + logging.NewKV("httpClient", httpClient), + logging.NewKV("goClient", goClient), + logging.NewKV("mutationType", mutationType), + logging.NewKV("databaseDir", databaseDir), + logging.NewKV("changeDetector.Enabled", changeDetector.Enabled), + logging.NewKV("changeDetector.SetupOnly", changeDetector.SetupOnly), + logging.NewKV("changeDetector.SourceBranch", changeDetector.SourceBranch), + logging.NewKV("changeDetector.TargetBranch", changeDetector.TargetBranch), + logging.NewKV("changeDetector.Repository", changeDetector.Repository), + ) flattenActions(&testCase) startActionIndex, endActionIndex := getActionRange(testCase) @@ -632,7 +598,7 @@ func getActionRange(testCase TestCase) (int, int) { startIndex := 0 endIndex := len(testCase.Actions) - 1 - if !DetectDbChanges { + if !changeDetector.Enabled { return startIndex, endIndex } @@ -656,7 +622,7 @@ ActionLoop: } } - if SetupOnly { + if changeDetector.SetupOnly { if setupCompleteIndex > -1 { endIndex = setupCompleteIndex } else if firstNonSetupIndex > -1 { @@ -825,7 +791,7 @@ func configureNode( s *state, action ConfigureNode, ) { - if DetectDbChanges { + if changeDetector.Enabled { // We do not yet support the change detector for tests running across multiple nodes. s.t.SkipNow() return From 9ec35ebf95b325bc2a80087718c615bb066d712c Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 20 Sep 2023 12:18:22 -0700 Subject: [PATCH 03/55] fix(i): Change detector missing deps (#1899) ## Relevant issue(s) N/A ## Description This PR fixes an issue where the change detector was not installing dependencies when using a local target branch. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? CI Specify the platform(s) on which this was tested: - MacOS --- tests/change_detector/change_detector_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/change_detector/change_detector_test.go b/tests/change_detector/change_detector_test.go index ac9bc1a23f..519bc7d965 100644 --- a/tests/change_detector/change_detector_test.go +++ b/tests/change_detector/change_detector_test.go @@ -28,7 +28,6 @@ import ( func TestChanges(t *testing.T) { sourceRepoDir := t.TempDir() execClone(t, sourceRepoDir, Repository, SourceBranch) - execMakeDeps(t, sourceRepoDir) var targetRepoDir string if TargetBranch == "" { @@ -40,13 +39,15 @@ func TestChanges(t *testing.T) { // check out the target branch targetRepoDir = t.TempDir() execClone(t, targetRepoDir, Repository, TargetBranch) - execMakeDeps(t, targetRepoDir) } if checkIfDatabaseFormatChangesAreDocumented(t, sourceRepoDir, targetRepoDir) { t.Skip("skipping test with documented database format changes") } + execMakeDeps(t, sourceRepoDir) + execMakeDeps(t, targetRepoDir) + targetRepoTestDir := filepath.Join(targetRepoDir, "tests", "integration") targetRepoPkgList := execList(t, targetRepoTestDir) From 84331e9bec20106b168d4aec8e0383a5602982e2 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:52:16 -0400 Subject: [PATCH 04/55] bot: Update combined dependabot PRs 19-09-2023 (#1898) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by the Combine PRs action by combining the following PRs: #1893 bot: Bump graphiql from 3.0.5 to 3.0.6 in /playground #1892 bot: Bump github.com/tidwall/btree from 1.6.0 to 1.7.0 #1891 bot: Bump google.golang.org/grpc from 1.58.0 to 1.58.1 ⚠️ The following PRs were left out due to merge conflicts: #1889 bot: Bump golang.org/x/net from 0.14.0 to 0.15.0 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 4 +- go.sum | 8 +- playground/package-lock.json | 221 +++++++++++++---------------------- playground/package.json | 2 +- 4 files changed, 87 insertions(+), 148 deletions(-) diff --git a/go.mod b/go.mod index 4af3d59cb4..5c99acb439 100644 --- a/go.mod +++ b/go.mod @@ -41,7 +41,7 @@ require ( github.com/spf13/viper v1.16.0 github.com/stretchr/testify v1.8.4 github.com/textileio/go-libp2p-pubsub-rpc v0.0.9 - github.com/tidwall/btree v1.6.0 + github.com/tidwall/btree v1.7.0 github.com/ugorji/go/codec v1.2.11 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 @@ -50,7 +50,7 @@ require ( go.uber.org/zap v1.25.0 golang.org/x/crypto v0.13.0 golang.org/x/net v0.14.0 - google.golang.org/grpc v1.58.0 + google.golang.org/grpc v1.58.1 google.golang.org/protobuf v1.31.0 ) diff --git a/go.sum b/go.sum index 9cc9fad3fd..3cb6fe1269 100644 --- a/go.sum +++ b/go.sum @@ -1260,8 +1260,8 @@ github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0 github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgiv9Yy9hddNM= github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsciyslwUe5bxV8CEU2w= -github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= -github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= @@ -1790,8 +1790,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= -google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58= +google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/playground/package-lock.json b/playground/package-lock.json index 952d577128..8bc56c24f1 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -10,7 +10,7 @@ "dependencies": { "@tanstack/react-query": "^4.35.3", "fast-json-patch": "^3.1.1", - "graphiql": "^3.0.5", + "graphiql": "^3.0.6", "graphql": "^16.8.0", "react": "^18.2.0", "react-dom": "^18.2.0", @@ -39,11 +39,11 @@ } }, "node_modules/@babel/runtime": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", - "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.15.tgz", + "integrity": "sha512-T0O+aa+4w0u06iNmapipJXMV4HoUir03hpx3/YqXXhu9xim3w+dVphjFWl1OH8NbZHw5Lbm9k45drDkgq2VNNA==", "dependencies": { - "regenerator-runtime": "^0.13.11" + "regenerator-runtime": "^0.14.0" }, "engines": { "node": ">=6.9.0" @@ -70,13 +70,13 @@ "peer": true }, "node_modules/@codemirror/view": { - "version": "6.16.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.16.0.tgz", - "integrity": "sha512-1Z2HkvkC3KR/oEZVuW9Ivmp8TWLzGEd8T8TA04TTwPvqogfkHBdYSlflytDOqmkUxM2d1ywTg7X2dU5mC+SXvg==", + "version": "6.19.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.19.0.tgz", + "integrity": "sha512-XqNIfW/3GaaF+T7Q1jBcRLCPm1NbrR2DBxrXacSt1FG+rNsdsNn3/azAfgpUoJ7yy4xgd8xTPa3AlL+y0lMizQ==", "peer": true, "dependencies": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -504,28 +504,28 @@ } }, "node_modules/@floating-ui/core": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.4.1.tgz", - "integrity": "sha512-jk3WqquEJRlcyu7997NtR5PibI+y5bi+LS3hPmguVClypenMsCY3CBa3LAQnozRCtCrYWSEtAdiskpamuJRFOQ==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.5.0.tgz", + "integrity": "sha512-kK1h4m36DQ0UHGj5Ah4db7R0rHemTqqO0QLvUqi1/mUUp3LuAWbWxdxSIf/XsnH9VS6rRVPLJCncjRzUvyCLXg==", "dependencies": { - "@floating-ui/utils": "^0.1.1" + "@floating-ui/utils": "^0.1.3" } }, "node_modules/@floating-ui/dom": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.1.tgz", - "integrity": "sha512-KwvVcPSXg6mQygvA1TjbN/gh///36kKtllIF8SUm0qpFj8+rvYrpvlYdL1JoA71SHpDqgSSdGOSoQ0Mp3uY5aw==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.3.tgz", + "integrity": "sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA==", "dependencies": { - "@floating-ui/core": "^1.4.1", - "@floating-ui/utils": "^0.1.1" + "@floating-ui/core": "^1.4.2", + "@floating-ui/utils": "^0.1.3" } }, "node_modules/@floating-ui/react-dom": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.1.tgz", - "integrity": "sha512-rZtAmSht4Lry6gdhAJDrCp/6rKN7++JnL1/Anbr/DdeyYXQPxvg/ivrbYvJulbRf4vL8b212suwMM2lxbv+RQA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.2.tgz", + "integrity": "sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ==", "dependencies": { - "@floating-ui/dom": "^1.3.0" + "@floating-ui/dom": "^1.5.1" }, "peerDependencies": { "react": ">=16.8.0", @@ -533,14 +533,14 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.1.tgz", - "integrity": "sha512-m0G6wlnhm/AX0H12IOWtK8gASEMffnX08RtKkCgTdHb9JpHKGloI7icFfLg9ZmQeavcvR0PKmzxClyuFPSjKWw==" + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.4.tgz", + "integrity": "sha512-qprfWkn82Iw821mcKofJ5Pk9wgioHicxcQMxx+5zt5GSKoqdWvgG5AxVmpmUUjzTLPVSH5auBrhI93Deayn/DA==" }, "node_modules/@graphiql/react": { - "version": "0.19.3", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.19.3.tgz", - "integrity": "sha512-rpxKcmKPhyGfZo1w9h3+E5FY+LXOn8o5fJxpJd2MbLF8segvvWLtJeXL46Q2IkEFqR4uxf00NUTbCwXjRIVaQQ==", + "version": "0.19.4", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.19.4.tgz", + "integrity": "sha512-qg3N2Zeuq2+GDMZddz7K/ak1p5O56kKuLM/idOJZD+Lxbk2e8Eye3KWM24lJuuCi2gdvJuqPMfCdewLXrHhEkw==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -551,10 +551,10 @@ "@types/codemirror": "^5.60.8", "clsx": "^1.2.1", "codemirror": "^5.65.3", - "codemirror-graphql": "^2.0.9", + "codemirror-graphql": "^2.0.10", "copy-to-clipboard": "^3.2.0", "framer-motion": "^6.5.1", - "graphql-language-service": "^5.1.7", + "graphql-language-service": "^5.2.0", "markdown-it": "^12.2.0", "set-value": "^4.1.0" }, @@ -583,9 +583,9 @@ } }, "node_modules/@headlessui/react": { - "version": "1.7.16", - "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.16.tgz", - "integrity": "sha512-2MphIAZdSUacZBT6EXk8AJkj+EuvaaJbtCyHTJrPsz8inhzCl7qeNPI1uk1AUvCgWylVtdN8cVVmnhUDPxPy3g==", + "version": "1.7.17", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.17.tgz", + "integrity": "sha512-4am+tzvkqDSSgiwrsEpGWqgGo9dz8qU5M3znCkC4PgkpY4HcCZzEDEvozltGGGHIKl9jbXbZPSH5TWn4sWJdow==", "dependencies": { "client-only": "^0.0.1" }, @@ -631,9 +631,9 @@ "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.4.tgz", + "integrity": "sha512-lZHlk8p67x4aIDtJl6UQrXSOP6oi7dQR3W/geFVrENdA1JDaAJWldnVqVjPMJupbTKbzDfFcePfKttqVidS/dg==", "peer": true }, "node_modules/@lezer/highlight": { @@ -646,9 +646,9 @@ } }, "node_modules/@lezer/lr": { - "version": "1.3.9", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.9.tgz", - "integrity": "sha512-XPz6dzuTHlnsbA5M2DZgjflNQ+9Hi5Swhic0RULdp3oOs3rh6bqGZolosVqN/fQIT8uNiepzINJDnS39oweTHQ==", + "version": "1.3.10", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.10.tgz", + "integrity": "sha512-BZfVvf7Re5BIwJHlZXbJn9L8lus5EonxQghyn+ih8Wl36XMFBPTXC0KM0IdUtj9w/diPHsKlXVgL+AlX2jYJ0Q==", "peer": true, "dependencies": { "@lezer/common": "^1.0.0" @@ -665,11 +665,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/animation/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/dom": { "version": "10.12.0", "resolved": "https://registry.npmjs.org/@motionone/dom/-/dom-10.12.0.tgz", @@ -683,11 +678,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/dom/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/easing": { "version": "10.15.1", "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.15.1.tgz", @@ -697,11 +687,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/easing/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/generators": { "version": "10.15.1", "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.15.1.tgz", @@ -712,11 +697,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/generators/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/types": { "version": "10.15.1", "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.15.1.tgz", @@ -732,11 +712,6 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/utils/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@n1ru4l/push-pull-async-iterable-iterator": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@n1ru4l/push-pull-async-iterable-iterator/-/push-pull-async-iterable-iterator-3.2.0.tgz", @@ -1488,9 +1463,9 @@ } }, "node_modules/@types/codemirror": { - "version": "5.60.8", - "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.8.tgz", - "integrity": "sha512-VjFgDF/eB+Aklcy15TtOTLQeMjTo07k7KAjql8OK5Dirr7a6sJY4T1uVBDuTVG9VEmn1uUsohOpYnVfgC6/jyw==", + "version": "5.60.10", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.10.tgz", + "integrity": "sha512-ZTA3teiCWKT8HUUofqlGPlShu5ojdIajizsS0HpH6GL0/iEdjRt7fXbCLHHqKYP5k7dC/HnnWIjZAiELUwBdjQ==", "dependencies": { "@types/tern": "*" } @@ -1545,9 +1520,9 @@ "dev": true }, "node_modules/@types/tern": { - "version": "0.23.4", - "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.4.tgz", - "integrity": "sha512-JAUw1iXGO1qaWwEOzxTKJZ/5JxVeON9kvGZ/osgZaJImBnyjyn0cjovPsf6FNLmyGY8Vw9DoXZCMlfMkMwHRWg==", + "version": "0.23.5", + "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.5.tgz", + "integrity": "sha512-POau56wDk3TQ0mQ0qG7XDzv96U5whSENZ9lC0htDvEH+9YUREo+J2U+apWcVRgR2UydEE70JXZo44goG+akTNQ==", "dependencies": { "@types/estree": "*" } @@ -1830,11 +1805,6 @@ "node": ">=10" } }, - "node_modules/aria-hidden/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/array-union": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", @@ -1911,16 +1881,17 @@ } }, "node_modules/codemirror": { - "version": "5.65.14", - "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.14.tgz", - "integrity": "sha512-VSNugIBDGt0OU9gDjeVr6fNkoFQznrWEUdAApMlXQNbfE8gGO19776D6MwSqF/V/w/sDwonsQ0z7KmmI9guScg==" + "version": "5.65.15", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.15.tgz", + "integrity": "sha512-YC4EHbbwQeubZzxLl5G4nlbLc1T21QTrKGaOal/Pkm9dVDMZXMH7+ieSPEOZCtO9I68i8/oteJKOxzHC2zR+0g==" }, "node_modules/codemirror-graphql": { - "version": "2.0.9", - "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.9.tgz", - "integrity": "sha512-gl1LR6XSBgZtl7Dr2q4jjRNfhxMF8vn+rnjZTZPf/l+VrQgavY8l3G//hW7s3hWy73iiqkq5LZ4KE1tdaxB/vQ==", + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.10.tgz", + "integrity": "sha512-rC9NxibCsSzWtCQjHLfwKCkyYdGv2BT/BCgyDoKPrc/e7aGiyLyeT0fB60d+0imwlvhX3lIHncl6JMz2YxQ/jg==", "dependencies": { - "graphql-language-service": "5.1.7" + "@types/codemirror": "^0.0.90", + "graphql-language-service": "5.2.0" }, "peerDependencies": { "@codemirror/language": "6.0.0", @@ -1928,6 +1899,14 @@ "graphql": "^15.5.0 || ^16.0.0" } }, + "node_modules/codemirror-graphql/node_modules/@types/codemirror": { + "version": "0.0.90", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.90.tgz", + "integrity": "sha512-8Z9+tSg27NPRGubbUPUCrt5DDG/OWzLph5BvcDykwR5D7RyZh5mhHG0uS1ePKV1YFCA+/cwc4Ey2AJAEFfV3IA==", + "dependencies": { + "@types/tern": "*" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", @@ -2408,11 +2387,6 @@ "react-dom": ">=16.8 || ^17.0.0 || ^18.0.0" } }, - "node_modules/framer-motion/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/framesync": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/framesync/-/framesync-6.0.1.tgz", @@ -2421,11 +2395,6 @@ "tslib": "^2.1.0" } }, - "node_modules/framesync/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", @@ -2528,13 +2497,13 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.5.tgz", - "integrity": "sha512-R02CKVXPajOmJcg0TAKuRMU8qvwb7ltGDYqbaQMKbLeYYw/wQUrmTxLwdVuRadgRL4ubNzl3q5vKTkQKR5Ay2Q==", + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.6.tgz", + "integrity": "sha512-PuyAhRQibTrwT3RUKmwIGrJAB+M1gg+TAftmChjBqQW0n5WMFFvP5Wcr2NEikomY0s06+oKeUGhBU2iPrq+cSQ==", "dependencies": { - "@graphiql/react": "^0.19.3", + "@graphiql/react": "^0.19.4", "@graphiql/toolkit": "^0.9.1", - "graphql-language-service": "^5.1.7", + "graphql-language-service": "^5.2.0", "markdown-it": "^12.2.0" }, "peerDependencies": { @@ -2552,9 +2521,9 @@ } }, "node_modules/graphql-language-service": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.1.7.tgz", - "integrity": "sha512-xkawYMJeoNYGhT+SpSH3c2qf6HpGHQ/duDmrseVHBpVCrXAiGnliXGSCC4jyMGgZQ05GytsZ12p0nUo7s6lSSw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.2.0.tgz", + "integrity": "sha512-o/ZgTS0pBxWm3hSF4+6GwiV1//DxzoLWEbS38+jqpzzy1d/QXBidwQuVYTOksclbtOJZ3KR/tZ8fi/tI6VpVMg==", "dependencies": { "nullthrows": "^1.0.0", "vscode-languageserver-types": "^3.17.1" @@ -3042,11 +3011,6 @@ "tslib": "^2.1.0" } }, - "node_modules/popmotion/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/postcss": { "version": "8.4.27", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", @@ -3196,16 +3160,6 @@ } } }, - "node_modules/react-remove-scroll-bar/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, - "node_modules/react-remove-scroll/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/react-style-singleton": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", @@ -3228,15 +3182,10 @@ } } }, - "node_modules/react-style-singleton/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" }, "node_modules/resolve-from": { "version": "4.0.0", @@ -3415,9 +3364,9 @@ } }, "node_modules/style-mod": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.3.tgz", - "integrity": "sha512-78Jv8kYJdjbvRwwijtCevYADfsI0lGzYJe4mMFdceO8l75DFFDoqBhR1jVDicDRRaX4//g1u9wKeo+ztc2h1Rw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==", "peer": true }, "node_modules/style-value-types": { @@ -3429,11 +3378,6 @@ "tslib": "^2.1.0" } }, - "node_modules/style-value-types/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", @@ -3481,6 +3425,11 @@ "typescript": ">=4.2.0" } }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3552,11 +3501,6 @@ } } }, - "node_modules/use-callback-ref/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/use-sidecar": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", @@ -3578,11 +3522,6 @@ } } }, - "node_modules/use-sidecar/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/use-sync-external-store": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", diff --git a/playground/package.json b/playground/package.json index d211df0704..e4dcd95cce 100644 --- a/playground/package.json +++ b/playground/package.json @@ -12,7 +12,7 @@ "dependencies": { "@tanstack/react-query": "^4.35.3", "fast-json-patch": "^3.1.1", - "graphiql": "^3.0.5", + "graphiql": "^3.0.6", "graphql": "^16.8.0", "react": "^18.2.0", "react-dom": "^18.2.0", From 9e3b4e73262f8947b0580a1bf9fb09e2dcd0438f Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Fri, 22 Sep 2023 10:37:18 -0700 Subject: [PATCH 05/55] feat: Add CCIP Support (#1896) ## Relevant issue(s) Resolves #1894 ## Description This PR adds an HTTP handler that enables DefraDB to act as an off-chain data oracle for smart contracts. Here is an end-to-end example of how this feature works: https://github.com/sourcenetwork/defradb-example-ccip ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? Unit tests Specify the platform(s) on which this was tested: - MacOS --- http/handler_ccip.go | 74 ++++++++++++++ http/handler_ccip_test.go | 207 ++++++++++++++++++++++++++++++++++++++ http/server.go | 5 + 3 files changed, 286 insertions(+) create mode 100644 http/handler_ccip.go create mode 100644 http/handler_ccip_test.go diff --git a/http/handler_ccip.go b/http/handler_ccip.go new file mode 100644 index 0000000000..a0d1af7823 --- /dev/null +++ b/http/handler_ccip.go @@ -0,0 +1,74 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "encoding/hex" + "encoding/json" + "net/http" + "strings" + + "github.com/go-chi/chi/v5" + + "github.com/sourcenetwork/defradb/client" +) + +type ccipHandler struct{} + +type CCIPRequest struct { + Sender string `json:"sender"` + Data string `json:"data"` +} + +type CCIPResponse struct { + Data string `json:"data"` +} + +// ExecCCIP handles GraphQL over Cross Chain Interoperability Protocol requests. +func (c *ccipHandler) ExecCCIP(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + var ccipReq CCIPRequest + switch req.Method { + case http.MethodGet: + ccipReq.Sender = chi.URLParam(req, "sender") + ccipReq.Data = chi.URLParam(req, "data") + case http.MethodPost: + if err := requestJSON(req, &ccipReq); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + } + + data, err := hex.DecodeString(strings.TrimPrefix(ccipReq.Data, "0x")) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + var request GraphQLRequest + if err := json.Unmarshal(data, &request); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + + result := store.ExecRequest(req.Context(), request.Query) + if result.Pub != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrStreamingNotSupported}) + return + } + resultJSON, err := json.Marshal(result.GQL) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + resultHex := "0x" + hex.EncodeToString(resultJSON) + responseJSON(rw, http.StatusOK, CCIPResponse{Data: resultHex}) +} diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go new file mode 100644 index 0000000000..7884e16df7 --- /dev/null +++ b/http/handler_ccip_test.go @@ -0,0 +1,207 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" +) + +func TestCCIPGet_WithValidData(t *testing.T) { + cdb := setupDatabase(t) + + gqlData, err := json.Marshal(&GraphQLRequest{ + Query: `query { + User { + name + } + }`, + }) + require.NoError(t, err) + + data := "0x" + hex.EncodeToString([]byte(gqlData)) + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + require.NotNil(t, res.Body) + + resData, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var ccipRes CCIPResponse + err = json.Unmarshal(resData, &ccipRes) + require.NoError(t, err) + + resHex, err := hex.DecodeString(strings.TrimPrefix(ccipRes.Data, "0x")) + require.NoError(t, err) + + assert.JSONEq(t, `{"data": [{"name": "bob"}]}`, string(resHex)) +} + +func TestCCIPGet_WithSubscription(t *testing.T) { + cdb := setupDatabase(t) + + gqlData, err := json.Marshal(&GraphQLRequest{ + Query: `subscription { + User { + name + } + }`, + }) + require.NoError(t, err) + + data := "0x" + hex.EncodeToString([]byte(gqlData)) + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPGet_WithInvalidData(t *testing.T) { + cdb := setupDatabase(t) + + data := "invalid_hex_data" + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPPost_WithValidData(t *testing.T) { + cdb := setupDatabase(t) + + gqlJSON, err := json.Marshal(&GraphQLRequest{ + Query: `query { + User { + name + } + }`, + }) + require.NoError(t, err) + + body, err := json.Marshal(&CCIPRequest{ + Data: "0x" + hex.EncodeToString([]byte(gqlJSON)), + Sender: "0x0000000000000000000000000000000000000000", + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + require.NotNil(t, res.Body) + + resData, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var ccipRes CCIPResponse + err = json.Unmarshal(resData, &ccipRes) + require.NoError(t, err) + + resHex, err := hex.DecodeString(strings.TrimPrefix(ccipRes.Data, "0x")) + require.NoError(t, err) + + assert.JSONEq(t, `{"data": [{"name": "bob"}]}`, string(resHex)) +} + +func TestCCIPPost_WithInvalidGraphQLRequest(t *testing.T) { + cdb := setupDatabase(t) + + body, err := json.Marshal(&CCIPRequest{ + Data: "0x" + hex.EncodeToString([]byte("invalid_graphql_request")), + Sender: "0x0000000000000000000000000000000000000000", + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPPost_WithInvalidBody(t *testing.T) { + cdb := setupDatabase(t) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", nil) + rec := httptest.NewRecorder() + + handler := NewServer(cdb) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func setupDatabase(t *testing.T) client.DB { + ctx := context.Background() + + cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = cdb.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + col, err := cdb.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + return cdb +} diff --git a/http/server.go b/http/server.go index afee4b9217..92da350aa1 100644 --- a/http/server.go +++ b/http/server.go @@ -33,6 +33,7 @@ func NewServer(db client.DB) *Server { store_handler := &storeHandler{} collection_handler := &collectionHandler{} lens_handler := &lensHandler{} + ccip_handler := &ccipHandler{} router := chi.NewRouter() router.Use(middleware.RequestLogger(&logFormatter{})) @@ -82,6 +83,10 @@ func NewServer(db client.DB) *Server { graphQL.Get("/", store_handler.ExecRequest) graphQL.Post("/", store_handler.ExecRequest) }) + api.Route("/ccip", func(ccip chi.Router) { + ccip.Get("/{sender}/{data}", ccip_handler.ExecCCIP) + ccip.Post("/", ccip_handler.ExecCCIP) + }) api.Route("/p2p", func(p2p chi.Router) { p2p.Route("/replicators", func(p2p_replicators chi.Router) { p2p_replicators.Get("/", store_handler.GetAllReplicators) From 667023eed3bbbec9d5536af5ff1b98e953418432 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 24 Sep 2023 05:03:02 -0700 Subject: [PATCH 06/55] bot: Bump graphql from 16.8.0 to 16.8.1 in /playground (#1901) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [graphql](https://github.com/graphql/graphql-js) from 16.8.0 to 16.8.1.
Release notes

Sourced from graphql's releases.

v16.8.1 (2023-09-19)

Bug Fix 🐞

Committers: 1

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=graphql&package-manager=npm_and_yarn&previous-version=16.8.0&new-version=16.8.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/sourcenetwork/defradb/network/alerts).
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- playground/package-lock.json | 8 ++++---- playground/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 8bc56c24f1..dfb073f155 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -11,7 +11,7 @@ "@tanstack/react-query": "^4.35.3", "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", - "graphql": "^16.8.0", + "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", "react-hook-form": "^7.46.1" @@ -2513,9 +2513,9 @@ } }, "node_modules/graphql": { - "version": "16.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.0.tgz", - "integrity": "sha512-0oKGaR+y3qcS5mCu1vb7KG+a89vjn06C7Ihq/dDl3jA+A8B3TKomvi3CiEcVLJQGalbu8F52LxkOym7U5sSfbg==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } diff --git a/playground/package.json b/playground/package.json index e4dcd95cce..f5d9767dea 100644 --- a/playground/package.json +++ b/playground/package.json @@ -13,7 +13,7 @@ "@tanstack/react-query": "^4.35.3", "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", - "graphql": "^16.8.0", + "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", "react-hook-form": "^7.46.1" From db1f41c2378273ec814abb043d4ecd3046a75eed Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 25 Sep 2023 10:49:47 -0400 Subject: [PATCH 07/55] feat: Allow setting of default schema version (#1888) ## Relevant issue(s) Resolves #1884 ## Description Allows setting of default schema version, allowing rapid switching between (application) api/database versions. It also allows them to define and apply schema updates eagerly, and then make the switch at a later date. --- api/http/handlerfuncs.go | 5 +- client/db.go | 14 +- client/mocks/db.go | 64 ++++- db/collection.go | 61 ++++- db/schema.go | 5 +- db/txn_db.go | 27 +- http/client.go | 25 +- http/handler_store.go | 22 +- http/server.go | 1 + http/wrapper.go | 8 +- .../migrations/query/with_set_default_test.go | 236 ++++++++++++++++++ .../schema/updates/add/field/simple_test.go | 35 +++ .../schema/with_update_set_default_test.go | 146 +++++++++++ tests/integration/test_case.go | 20 +- tests/integration/utils2.go | 27 +- 15 files changed, 656 insertions(+), 40 deletions(-) create mode 100644 tests/integration/schema/migrations/query/with_set_default_test.go create mode 100644 tests/integration/schema/with_update_set_default_test.go diff --git a/api/http/handlerfuncs.go b/api/http/handlerfuncs.go index e4163de05f..2a248d7d81 100644 --- a/api/http/handlerfuncs.go +++ b/api/http/handlerfuncs.go @@ -271,7 +271,10 @@ func patchSchemaHandler(rw http.ResponseWriter, req *http.Request) { return } - err = db.PatchSchema(req.Context(), string(patch)) + // Hardcode setDefault to true here, as that preserves the existing behaviour. + // This function will be ripped out very shortly and I don't think it is worth + // spending time/thought here. The new http api handles this correctly. + err = db.PatchSchema(req.Context(), string(patch), true) if err != nil { handleErr(req.Context(), rw, err, http.StatusInternalServerError) return diff --git a/client/db.go b/client/db.go index ba4dd0b89d..47cd7d5a85 100644 --- a/client/db.go +++ b/client/db.go @@ -96,7 +96,8 @@ type Store interface { AddSchema(context.Context, string) ([]CollectionDescription, error) // PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions - // present in the database. + // present in the database. If true is provided, the new schema versions will be made default, otherwise + // [SetDefaultSchemaVersion] should be called to set them so. // // It will also update the GQL types used by the query system. It will error and not apply any of the // requested, valid updates should the net result of the patch result in an invalid state. The @@ -109,7 +110,16 @@ type Store interface { // // Field [FieldKind] values may be provided in either their raw integer form, or as string as per // [FieldKindStringToEnumMapping]. - PatchSchema(context.Context, string) error + PatchSchema(context.Context, string, bool) error + + // SetDefaultSchemaVersion sets the default schema version to the ID provided. It will be applied to all + // collections using the schema. + // + // This will affect all operations interacting with the schema where a schema version is not explicitly + // provided. This includes GQL queries and Collection operations. + // + // It will return an error if the provided schema version ID does not exist. + SetDefaultSchemaVersion(context.Context, string) error // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to // calling `LensRegistry().SetMigration(ctx, cfg)`. diff --git a/client/mocks/db.go b/client/mocks/db.go index cb0af26193..02d32c4e8c 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -992,13 +992,13 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor return _c } -// PatchSchema provides a mock function with given fields: _a0, _a1 -func (_m *DB) PatchSchema(_a0 context.Context, _a1 string) error { - ret := _m.Called(_a0, _a1) +// PatchSchema provides a mock function with given fields: _a0, _a1, _a2 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 bool) error { + ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { + r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) } @@ -1014,13 +1014,14 @@ type DB_PatchSchema_Call struct { // PatchSchema is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}) *DB_PatchSchema_Call { - return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1)} +// - _a2 bool +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2)} } -func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 bool)) *DB_PatchSchema_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) + run(args[0].(context.Context), args[1].(string), args[2].(bool)) }) return _c } @@ -1030,7 +1031,7 @@ func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { return _c } -func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string) error) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, bool) error) *DB_PatchSchema_Call { _c.Call.Return(run) return _c } @@ -1163,6 +1164,49 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca return _c } +// SetDefaultSchemaVersion provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// DB_SetDefaultSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDefaultSchemaVersion' +type DB_SetDefaultSchemaVersion_Call struct { + *mock.Call +} + +// SetDefaultSchemaVersion is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) SetDefaultSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetDefaultSchemaVersion_Call { + return &DB_SetDefaultSchemaVersion_Call{Call: _e.mock.On("SetDefaultSchemaVersion", _a0, _a1)} +} + +func (_c *DB_SetDefaultSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetDefaultSchemaVersion_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_SetDefaultSchemaVersion_Call) Return(_a0 error) *DB_SetDefaultSchemaVersion_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *DB_SetDefaultSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetDefaultSchemaVersion_Call { + _c.Call.Return(run) + return _c +} + // SetMigration provides a mock function with given fields: _a0, _a1 func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { ret := _m.Called(_a0, _a1) diff --git a/db/collection.go b/db/collection.go index a9d3f5c403..cda5cbf584 100644 --- a/db/collection.go +++ b/db/collection.go @@ -234,6 +234,7 @@ func (db *db) updateCollection( existingDescriptionsByName map[string]client.CollectionDescription, proposedDescriptionsByName map[string]client.CollectionDescription, desc client.CollectionDescription, + setAsDefaultVersion bool, ) (client.Collection, error) { hasChanged, err := db.validateUpdateCollection(ctx, txn, existingDescriptionsByName, proposedDescriptionsByName, desc) if err != nil { @@ -300,24 +301,19 @@ func (db *db) updateCollection( return nil, err } - collectionSchemaKey := core.NewCollectionSchemaKey(desc.Schema.SchemaID) - err = txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } - - collectionKey := core.NewCollectionKey(desc.Name) - err = txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } - schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Schema.SchemaID, previousSchemaVersionID) err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) if err != nil { return nil, err } + if setAsDefaultVersion { + err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, desc.Schema.SchemaID, schemaVersionID) + if err != nil { + return nil, err + } + } + return db.getCollectionByName(ctx, txn, desc.Name) } @@ -591,6 +587,47 @@ func validateUpdateCollectionIndexes( return false, nil } +func (db *db) setDefaultSchemaVersion( + ctx context.Context, + txn datastore.Txn, + schemaVersionID string, +) error { + col, err := db.getCollectionByVersionID(ctx, txn, schemaVersionID) + if err != nil { + return err + } + + desc := col.Description() + err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, desc.Schema.SchemaID, schemaVersionID) + if err != nil { + return err + } + + cols, err := db.getCollectionDescriptions(ctx, txn) + if err != nil { + return err + } + + return db.parser.SetSchema(ctx, txn, cols) +} + +func (db *db) setDefaultSchemaVersionExplicit( + ctx context.Context, + txn datastore.Txn, + collectionName string, + schemaID string, + schemaVersionID string, +) error { + collectionSchemaKey := core.NewCollectionSchemaKey(schemaID) + err := txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schemaVersionID)) + if err != nil { + return err + } + + collectionKey := core.NewCollectionKey(collectionName) + return txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schemaVersionID)) +} + // getCollectionByVersionId returns the [*collection] at the given [schemaVersionId] version. // // Will return an error if the given key is empty, or not found. diff --git a/db/schema.go b/db/schema.go index 5c5c0568f8..910f44f8c1 100644 --- a/db/schema.go +++ b/db/schema.go @@ -103,7 +103,7 @@ func (db *db) getCollectionDescriptions( // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString string) error { +func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString string, setAsDefaultVersion bool) error { patch, err := jsonpatch.DecodePatch([]byte(patchString)) if err != nil { return err @@ -144,10 +144,11 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st } for i, desc := range newDescriptions { - col, err := db.updateCollection(ctx, txn, collectionsByName, newDescriptionsByName, desc) + col, err := db.updateCollection(ctx, txn, collectionsByName, newDescriptionsByName, desc, setAsDefaultVersion) if err != nil { return err } + newDescriptions[i] = col.Description() } diff --git a/db/txn_db.go b/db/txn_db.go index b307d96e35..b4cc32dee1 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -250,14 +250,14 @@ func (db *explicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([] // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string) error { +func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.patchSchema(ctx, txn, patchString) + err = db.patchSchema(ctx, txn, patchString, setAsDefaultVersion) if err != nil { return err } @@ -276,8 +276,27 @@ func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string) er // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string) error { - return db.patchSchema(ctx, db.txn, patchString) +func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { + return db.patchSchema(ctx, db.txn, patchString, setAsDefaultVersion) +} + +func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + txn, err := db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + err = db.setDefaultSchemaVersion(ctx, txn, schemaVersionID) + if err != nil { + return err + } + + return txn.Commit(ctx) +} + +func (db *explicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + return db.setDefaultSchemaVersion(ctx, db.txn, schemaVersionID) } func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { diff --git a/http/client.go b/http/client.go index 16a8924a65..867cdc3bb1 100644 --- a/http/client.go +++ b/http/client.go @@ -212,10 +212,31 @@ func (c *Client) AddSchema(ctx context.Context, schema string) ([]client.Collect return cols, nil } -func (c *Client) PatchSchema(ctx context.Context, patch string) error { +type patchSchemaRequest struct { + Patch string + SetAsDefaultVersion bool +} + +func (c *Client) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { methodURL := c.http.baseURL.JoinPath("schema") - req, err := http.NewRequestWithContext(ctx, http.MethodPatch, methodURL.String(), strings.NewReader(patch)) + body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion}) + if err != nil { + return err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + methodURL := c.http.baseURL.JoinPath("schema", "default") + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schemaVersionID)) if err != nil { return err } diff --git a/http/handler_store.go b/http/handler_store.go index d0cbdf42d2..945f6115f8 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -151,12 +151,30 @@ func (s *storeHandler) AddSchema(rw http.ResponseWriter, req *http.Request) { func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - patch, err := io.ReadAll(req.Body) + var message patchSchemaRequest + err := requestJSON(req, &message) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err = store.PatchSchema(req.Context(), string(patch)) + + err = store.PatchSchema(req.Context(), message.Patch, message.SetAsDefaultVersion) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + schemaVersionID, err := io.ReadAll(req.Body) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err = store.SetDefaultSchemaVersion(req.Context(), string(schemaVersionID)) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return diff --git a/http/server.go b/http/server.go index 92da350aa1..7ad21e0632 100644 --- a/http/server.go +++ b/http/server.go @@ -54,6 +54,7 @@ func NewServer(db client.DB) *Server { api.Route("/schema", func(schema chi.Router) { schema.Post("/", store_handler.AddSchema) schema.Patch("/", store_handler.PatchSchema) + schema.Post("/default", store_handler.SetDefaultSchemaVersion) }) api.Route("/collections", func(collections chi.Router) { collections.Get("/", store_handler.GetCollection) diff --git a/http/wrapper.go b/http/wrapper.go index 558dc79474..eb91ffdb7a 100644 --- a/http/wrapper.go +++ b/http/wrapper.go @@ -86,8 +86,12 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec return w.client.AddSchema(ctx, schema) } -func (w *Wrapper) PatchSchema(ctx context.Context, patch string) error { - return w.client.PatchSchema(ctx, patch) +func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { + return w.client.PatchSchema(ctx, patch, setAsDefaultVersion) +} + +func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + return w.client.SetDefaultSchemaVersion(ctx, schemaVersionID) } func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go new file mode 100644 index 0000000000..e276bcab24 --- /dev/null +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -0,0 +1,236 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t *testing.T) { + schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID2, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t *testing.T) { + schemaVersionID1 := "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm" + schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID2, + }, + // Create John using the new schema version + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "verified": true + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: schemaVersionID1, + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + // Set the schema version back to the original + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID1, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // The inverse lens migration has been applied, clearing the verified field + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt_ClearsMigrations(t *testing.T) { + schemaVersionID1 := "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm" + schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + // Create John using the original schema version + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "verified": false + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(true), + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: schemaVersionID1, + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + // Set the schema version back to the original + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID1, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // The inverse lens migration has not been applied, the document is returned as it was defined + "verified": false, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index d64f9e3bbe..1fe6980a62 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -13,6 +13,8 @@ package field import ( "testing" + "github.com/sourcenetwork/immutable" + testUtils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -48,6 +50,39 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { testUtils.ExecuteTestCase(t, test) } +func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, add field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + func TestSchemaUpdatesAddFieldSimpleErrorsAddingToUnknownCollection(t *testing.T) { test := testUtils.TestCase{ Description: "Test schema update, add to unknown collection fails", diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go new file mode 100644 index 0000000000..3b365e0e5f --- /dev/null +++ b/tests/integration/schema/with_update_set_default_test.go @@ -0,0 +1,146 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchema_WithUpdateAndSetDefaultVersionToEmptyString_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to empty string", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "", + ExpectedError: "schema version ID can't be empty", + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToUnknownVersion_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to invalid string", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "does not exist", + ExpectedError: "datastore: key not found", + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to original schema version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // As the email field did not exist at this schema version, it will return a gql error + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to new schema version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e", + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index e17adfdeaa..10f3cf7262 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -81,8 +81,24 @@ type SchemaPatch struct { // If a value is not provided the patch will be applied to all nodes. NodeID immutable.Option[int] - Patch string - ExpectedError string + Patch string + + // If SetAsDefaultVersion has a value, and that value is false then the schema version + // resulting from this patch will not be made default. + SetAsDefaultVersion immutable.Option[bool] + ExpectedError string +} + +// SetDefaultSchemaVersion is an action that will set the default schema version to the +// given value. +type SetDefaultSchemaVersion struct { + // NodeID may hold the ID (index) of a node to set the default schema version on. + // + // If a value is not provided the default will be set on all nodes. + NodeID immutable.Option[int] + + SchemaVersionID string + ExpectedError string } // CreateDoc will attempt to create the given document in the given collection diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index f722516445..f41e1a7485 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -384,6 +384,9 @@ func executeTestCase( case SchemaPatch: patchSchema(s, action) + case SetDefaultSchemaVersion: + setDefaultSchemaVersion(s, action) + case ConfigureMigration: configureMigration(s, action) @@ -1030,7 +1033,14 @@ func patchSchema( action SchemaPatch, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - err := node.DB.PatchSchema(s.ctx, action.Patch) + var setAsDefaultVersion bool + if action.SetAsDefaultVersion.HasValue() { + setAsDefaultVersion = action.SetAsDefaultVersion.Value() + } else { + setAsDefaultVersion = true + } + + err := node.DB.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1041,6 +1051,21 @@ func patchSchema( refreshIndexes(s) } +func setDefaultSchemaVersion( + s *state, + action SetDefaultSchemaVersion, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + err := node.DB.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + } + + refreshCollections(s) + refreshIndexes(s) +} + // createDoc creates a document using the chosen [mutationType] and caches it in the // test state object. func createDoc( From 2c862acce1231fb9dc98f947200d4b81658ebea5 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 27 Sep 2023 11:18:32 -0700 Subject: [PATCH 08/55] fix(i): Flaky normalize filter test (#1912) ## Relevant issue(s) Resolves #1879 ## Description This PR attempts to fix a flaky filter test. The normalization logic has been refactored to a recursive / functional approach that (hopefully) makes it a little easier to understand. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? Ran normalize tests in a loop for 200k+ iterations. Specify the platform(s) on which this was tested: - MacOS --- planner/filter/normalize.go | 275 +++++++++++++++++++++++------------- 1 file changed, 173 insertions(+), 102 deletions(-) diff --git a/planner/filter/normalize.go b/planner/filter/normalize.go index 5f7d275418..181b1f8485 100644 --- a/planner/filter/normalize.go +++ b/planner/filter/normalize.go @@ -16,134 +16,205 @@ import ( ) // normalize normalizes the provided filter conditions. +// // The following cases are subject of normalization: // - _and or _or with one element is removed flattened // - double _not is removed // - any number of consecutive _ands with any number of elements is flattened +// // As the result object is a map with unique keys (a.k.a. properties), // while performing flattening of compound operators if the same property // is present in the result map, both conditions will be moved into an _and func normalize(conditions map[connor.FilterKey]any) map[connor.FilterKey]any { - return normalizeConditions(conditions, false).(map[connor.FilterKey]any) + return normalizeCondition(nil, conditions).(map[connor.FilterKey]any) } -func conditionsArrToMap(conditions []any) map[connor.FilterKey]any { +// normalizeCondition returns a normalized version of the given condition. +func normalizeCondition(parentKey connor.FilterKey, condition any) (result any) { + switch t := condition.(type) { + case map[connor.FilterKey]any: + result = normalizeConditions(parentKey, t) + + case []any: + conditions := make([]any, len(t)) + for i, c := range t { + conditions[i] = normalizeCondition(parentKey, c) + } + result = conditions + + default: + result = t + } + + return normalizeProperty(parentKey, result) +} + +// normalizeConditions returns a normalized version of the given conditions. +func normalizeConditions(parentKey connor.FilterKey, conditions map[connor.FilterKey]any) map[connor.FilterKey]any { result := make(map[connor.FilterKey]any) - for _, clause := range conditions { - if clauseMap, ok := clause.(map[connor.FilterKey]any); ok { - for k, v := range clauseMap { - result[k] = v + for key, val := range conditions { + result[key] = normalizeCondition(key, val) + + // check if the condition is an operator that can be normalized + op, ok := key.(*mapper.Operator) + if !ok { + continue + } + // check if we have any conditions that can be merged + merge := normalizeOperator(parentKey, op, result[key]) + if len(merge) == 0 { + continue + } + delete(result, key) + + // merge properties directly into result + for _, c := range merge { + for key, val := range c.(map[connor.FilterKey]any) { + result[key] = val } } + + // if the merged filter was an _or operator + // there may be child filters that can be merged + if op.Operation == request.FilterOpOr { + result = normalizeConditions(parentKey, result) + } } return result } -func addNormalizedCondition(key connor.FilterKey, val any, m map[connor.FilterKey]any) { - if _, isProp := key.(*mapper.PropertyIndex); isProp { - var andOp *mapper.Operator - var andContent []any - for existingKey := range m { - if op, isOp := existingKey.(*mapper.Operator); isOp && op.Operation == request.FilterOpAnd { - andOp = op - andContent = m[existingKey].([]any) - break - } - } - for existingKey := range m { - if existingKey.Equal(key) { - existingVal := m[existingKey] - delete(m, existingKey) - if andOp == nil { - andOp = &mapper.Operator{Operation: request.FilterOpAnd} - } - m[andOp] = append( - andContent, - map[connor.FilterKey]any{existingKey: existingVal}, - map[connor.FilterKey]any{key: val}, - ) - return - } +// normalizeOperator returns a normalized array of conditions. +func normalizeOperator(parentKey connor.FilterKey, op *mapper.Operator, condition any) []any { + switch op.Operation { + case request.FilterOpNot: + return normalizeOperatorNot(condition) + + case request.FilterOpOr: + return normalizeOperatorOr(condition) + + case request.FilterOpAnd: + return normalizeOperatorAnd(parentKey, condition) + + default: + return nil + } +} + +// normalizeOperatorAnd returns an array of conditions with all _and operators merged. +// +// If the parent operator is _not or _or, the subconditions will not be merged. +func normalizeOperatorAnd(parentKey connor.FilterKey, condition any) []any { + result := condition.([]any) + // always merge if only 1 property + if len(result) == 1 { + return result + } + // always merge if parent is not an operator + parentOp, ok := parentKey.(*mapper.Operator) + if !ok { + return result + } + // don't merge if parent is a _not or _or operator + if parentOp.Operation == request.FilterOpNot || parentOp.Operation == request.FilterOpOr { + return nil + } + return result +} + +// normalizeOperatorOr returns an array of conditions with all single _or operators merged. +func normalizeOperatorOr(condition any) []any { + result := condition.([]any) + // don't merge if more than 1 property + if len(result) > 1 { + return nil + } + return result +} + +// normalizeOperatorNot returns an array of conditions with all double _not operators merged. +func normalizeOperatorNot(condition any) (result []any) { + subConditions := condition.(map[connor.FilterKey]any) + // don't merge if more than 1 property + if len(subConditions) > 1 { + return nil + } + // find double _not occurances + for subKey, subCondition := range subConditions { + op, ok := subKey.(*mapper.Operator) + if ok && op.Operation == request.FilterOpNot { + result = append(result, subCondition) } - for _, andElement := range andContent { - elementMap := andElement.(map[connor.FilterKey]any) - for andElementKey := range elementMap { - if andElementKey.Equal(key) { - m[andOp] = append(andContent, map[connor.FilterKey]any{key: val}) - return - } + } + return result +} + +// normalizeProperty flattens and groups property filters where possible. +// +// Filters targeting the same property will be grouped into a single _and. +func normalizeProperty(parentKey connor.FilterKey, condition any) any { + switch t := condition.(type) { + case map[connor.FilterKey]any: + results := make(map[connor.FilterKey]any) + for _, c := range normalizeProperties(parentKey, []any{t}) { + for key, val := range c.(map[connor.FilterKey]any) { + results[key] = val } } + return results + + case []any: + return normalizeProperties(parentKey, t) + + default: + return t } - m[key] = val } -func normalizeConditions(conditions any, skipRoot bool) any { - result := make(map[connor.FilterKey]any) - switch typedConditions := conditions.(type) { - case map[connor.FilterKey]any: - for rootKey, rootVal := range typedConditions { - rootOpKey, isRootOp := rootKey.(*mapper.Operator) - if isRootOp { - if rootOpKey.Operation == request.FilterOpAnd || rootOpKey.Operation == request.FilterOpOr { - rootValArr := rootVal.([]any) - if len(rootValArr) == 1 || rootOpKey.Operation == request.FilterOpAnd && !skipRoot { - flat := normalizeConditions(conditionsArrToMap(rootValArr), false) - flatMap := flat.(map[connor.FilterKey]any) - for k, v := range flatMap { - addNormalizedCondition(k, v, result) - } - } else { - resultArr := []any{} - for i := range rootValArr { - norm := normalizeConditions(rootValArr[i], !skipRoot) - normMap, ok := norm.(map[connor.FilterKey]any) - if ok { - for k, v := range normMap { - resultArr = append(resultArr, map[connor.FilterKey]any{k: v}) - } - } else { - resultArr = append(resultArr, norm) - } - } - addNormalizedCondition(rootKey, resultArr, result) - } - } else if rootOpKey.Operation == request.FilterOpNot { - notMap := rootVal.(map[connor.FilterKey]any) - if len(notMap) == 1 { - var k connor.FilterKey - for k = range notMap { - break - } - norm := normalizeConditions(notMap, true).(map[connor.FilterKey]any) - delete(notMap, k) - var v any - for k, v = range norm { - break - } - if opKey, ok := k.(*mapper.Operator); ok && opKey.Operation == request.FilterOpNot { - notNotMap := normalizeConditions(v, false).(map[connor.FilterKey]any) - for notNotKey, notNotVal := range notNotMap { - addNormalizedCondition(notNotKey, notNotVal, result) - } - } else { - notMap[k] = v - addNormalizedCondition(rootOpKey, notMap, result) - } - } else { - addNormalizedCondition(rootKey, rootVal, result) - } - } else { - addNormalizedCondition(rootKey, rootVal, result) - } +// normalizeProperty flattens and groups property filters where possible. +// +// Filters targeting the same property will be grouped into a single _and. +func normalizeProperties(parentKey connor.FilterKey, conditions []any) []any { + var merge []any + var result []any + + // can only merge _and groups if parent is not an _or operator + parentOp, isParentOp := parentKey.(*mapper.Operator) + canMergeAnd := !isParentOp || parentOp.Operation != request.FilterOpOr + + // accumulate properties that can be merged into a single _and + // if canMergeAnd is true, all _and groups will be merged + props := make(map[int][]any) + for _, c := range conditions { + for key, val := range c.(map[connor.FilterKey]any) { + op, ok := key.(*mapper.Operator) + if canMergeAnd && ok && op.Operation == request.FilterOpAnd { + merge = append(merge, val.([]any)...) + } else if prop, ok := key.(*mapper.PropertyIndex); ok { + props[prop.Index] = append(props[prop.Index], map[connor.FilterKey]any{key: val}) } else { - addNormalizedCondition(rootKey, normalizeConditions(rootVal, false), result) + result = append(result, map[connor.FilterKey]any{key: val}) } } + } + + // merge filters with duplicate keys into a single _and + for _, val := range props { + if len(val) == 1 { + // only 1 property so no merge required + result = append(result, val...) + } else { + // multiple properties require merge with _and + merge = append(merge, val...) + } + } + + // nothing to merge + if len(merge) == 0 { return result - case []any: - return conditionsArrToMap(typedConditions) - default: - return conditions } + + // merge into a single _and operator + key := &mapper.Operator{Operation: request.FilterOpAnd} + result = append(result, map[connor.FilterKey]any{key: merge}) + return result } From e952be54fb3d6c24e5f4bf1325ba8381d0bc888b Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 27 Sep 2023 18:26:22 -0400 Subject: [PATCH 09/55] fix: Infinite loop when updating one-one relation (#1915) ## Relevant issue(s) Resolves #1914 ## Description Fixes an a bug where an infinite loop may be created when updating a self-referencing one-one relation from the secondary side. --- client/descriptions.go | 16 +- db/collection.go | 2 +- db/collection_update.go | 7 +- planner/type_join.go | 13 +- .../one_to_one/with_self_ref_test.go | 191 ++++++++++++++++++ 5 files changed, 218 insertions(+), 11 deletions(-) create mode 100644 tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go diff --git a/client/descriptions.go b/client/descriptions.go index 0b44f36b83..4f388fa7d3 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -52,13 +52,15 @@ func (col CollectionDescription) GetFieldByID(id FieldID) (FieldDescription, boo return FieldDescription{}, false } -// GetRelation returns the field that supports the relation of the given name. -func (col CollectionDescription) GetRelation(name string) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.RelationName == name { - return field, true - } +// GetFieldByRelation returns the field that supports the relation of the given name. +func (col CollectionDescription) GetFieldByRelation( + relationName string, + otherCollectionName string, + otherFieldName string, +) (FieldDescription, bool) { + for _, field := range col.Schema.Fields { + if field.RelationName == relationName && !(col.Name == otherCollectionName && otherFieldName == field.Name) { + return field, true } } return FieldDescription{}, false diff --git a/db/collection.go b/db/collection.go index cda5cbf584..f5dacccfb1 100644 --- a/db/collection.go +++ b/db/collection.go @@ -1062,7 +1062,7 @@ func (c *collection) save( if isSecondaryRelationID { primaryId := val.Value().(string) - err = c.patchPrimaryDoc(ctx, txn, relationFieldDescription, primaryKey.DocKey, primaryId) + err = c.patchPrimaryDoc(ctx, txn, c.Name(), relationFieldDescription, primaryKey.DocKey, primaryId) if err != nil { return cid.Undef, err } diff --git a/db/collection_update.go b/db/collection_update.go index 1a15482935..2e353dd0d3 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -350,6 +350,7 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie func (c *collection) patchPrimaryDoc( ctx context.Context, txn datastore.Txn, + secondaryCollectionName string, relationFieldDescription client.FieldDescription, docKey string, fieldValue string, @@ -365,7 +366,11 @@ func (c *collection) patchPrimaryDoc( } primaryCol = primaryCol.WithTxn(txn) - primaryField, ok := primaryCol.Description().GetRelation(relationFieldDescription.RelationName) + primaryField, ok := primaryCol.Description().GetFieldByRelation( + relationFieldDescription.RelationName, + secondaryCollectionName, + relationFieldDescription.Name, + ) if !ok { return client.NewErrFieldNotExist(relationFieldDescription.RelationName) } diff --git a/planner/type_join.go b/planner/type_join.go index f37437089e..ee771b01fc 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -259,7 +259,11 @@ func (p *Planner) makeTypeJoinOne( return nil, err } - subTypeField, subTypeFieldNameFound := subTypeCollectionDesc.GetRelation(subTypeFieldDesc.RelationName) + subTypeField, subTypeFieldNameFound := subTypeCollectionDesc.GetFieldByRelation( + subTypeFieldDesc.RelationName, + parent.sourceInfo.collectionDescription.Name, + subTypeFieldDesc.Name, + ) if !subTypeFieldNameFound { return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) } @@ -481,7 +485,12 @@ func (p *Planner) makeTypeJoinMany( return nil, err } - rootField, rootNameFound := subTypeCollectionDesc.GetRelation(subTypeFieldDesc.RelationName) + rootField, rootNameFound := subTypeCollectionDesc.GetFieldByRelation( + subTypeFieldDesc.RelationName, + parent.sourceInfo.collectionDescription.Name, + subTypeFieldDesc.Name, + ) + if !rootNameFound { return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) } diff --git a/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go new file mode 100644 index 0000000000..16225f4ab3 --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go @@ -0,0 +1,191 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdateOneToOne_SelfReferencingFromPrimary(t *testing.T) { + user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + + test := testUtils.TestCase{ + Description: "One to one update mutation, self referencing from primary", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + boss: User @primary + underling: User + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.UpdateDoc{ + DocID: 1, + Doc: fmt.Sprintf( + `{ + "boss_id": "%s" + }`, + user1ID, + ), + }, + testUtils.Request{ + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "boss": map[string]any{ + "name": "John", + }, + }, + { + "name": "John", + "boss": nil, + }, + }, + }, + testUtils.Request{ + Request: ` + query { + User { + name + underling { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "underling": nil, + }, + { + "name": "John", + "underling": map[string]any{ + "name": "Fred", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_SelfReferencingFromSecondary(t *testing.T) { + user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + + test := testUtils.TestCase{ + Description: "One to one update mutation, self referencing from secondary", + + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + boss: User + underling: User @primary + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.UpdateDoc{ + DocID: 1, + Doc: fmt.Sprintf( + `{ + "boss_id": "%s" + }`, + user1ID, + ), + }, + testUtils.Request{ + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "boss": map[string]any{ + "name": "John", + }, + }, + { + "name": "John", + "boss": nil, + }, + }, + }, + testUtils.Request{ + Request: ` + query { + User { + name + underling { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "underling": nil, + }, + { + "name": "John", + "underling": map[string]any{ + "name": "Fred", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} From 6244c5b8bfb3f6317cb73068b8579531a6def297 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 2 Oct 2023 12:14:29 -0400 Subject: [PATCH 10/55] fix: Remove collection name from schema ID generation (#1920) ## Relevant issue(s) Resolves #1083 ## Description Removes collection name from schema ID generation. It is a schema ID, not a collection ID, including the collection name makes no sense and is likely a historical artefact. --- api/http/handlerfuncs_test.go | 12 ++-- db/collection.go | 7 +- db/p2p_collection_test.go | 1 + .../i1083-rm-col-name-from-schema-id.md | 3 + .../integration/cli/client_schema_add_test.go | 4 +- .../cli/client_schema_migration_set_test.go | 8 +-- .../events/simple/with_update_test.go | 4 +- .../mutation/create/with_version_test.go | 2 +- .../simple/replicator/with_create_test.go | 4 +- .../integration/query/commits/simple_test.go | 30 ++++----- .../query/commits/with_cid_test.go | 8 +-- .../query/commits/with_depth_test.go | 34 +++++----- .../query/commits/with_dockey_cid_test.go | 4 +- .../query/commits/with_dockey_count_test.go | 6 +- .../query/commits/with_dockey_field_test.go | 4 +- .../commits/with_dockey_limit_offset_test.go | 4 +- .../query/commits/with_dockey_limit_test.go | 4 +- .../with_dockey_order_limit_offset_test.go | 4 +- .../query/commits/with_dockey_order_test.go | 66 +++++++++---------- .../query/commits/with_dockey_test.go | 46 ++++++------- .../commits/with_dockey_typename_test.go | 6 +- .../query/commits/with_field_test.go | 8 +-- .../query/commits/with_group_test.go | 16 ++--- .../latest_commits/with_dockey_field_test.go | 8 +-- .../query/latest_commits/with_dockey_test.go | 10 +-- .../query/one_to_many/with_cid_dockey_test.go | 8 +-- .../query/simple/with_cid_dockey_test.go | 12 ++-- .../query/simple/with_version_test.go | 14 ++-- .../schema/migrations/query/simple_test.go | 52 +++++++-------- .../migrations/query/with_dockey_test.go | 8 +-- .../schema/migrations/query/with_p2p_test.go | 12 ++-- .../migrations/query/with_restart_test.go | 4 +- .../migrations/query/with_set_default_test.go | 12 ++-- .../schema/migrations/query/with_txn_test.go | 8 +-- .../migrations/query/with_update_test.go | 8 +-- .../schema/migrations/simple_test.go | 8 +-- .../updates/add/field/create_update_test.go | 8 +-- .../schema/updates/move/simple_test.go | 2 +- .../schema/with_update_set_default_test.go | 4 +- 39 files changed, 232 insertions(+), 231 deletions(-) create mode 100644 docs/data_format_changes/i1083-rm-col-name-from-schema-id.md diff --git a/api/http/handlerfuncs_test.go b/api/http/handlerfuncs_test.go index bef944f908..ee7389a250 100644 --- a/api/http/handlerfuncs_test.go +++ b/api/http/handlerfuncs_test.go @@ -652,8 +652,8 @@ type group { "collections": []any{ map[string]any{ "name": "group", - "id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", - "version_id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", + "id": "bafkreicdtcgmgjjjao4zzaoacy26cl7xtnnev4qotvflellmdrzi57m5re", + "version_id": "bafkreicdtcgmgjjjao4zzaoacy26cl7xtnnev4qotvflellmdrzi57m5re", "fields": []any{ map[string]any{ "id": "0", @@ -683,8 +683,8 @@ type group { }, map[string]any{ "name": "user", - "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", - "version_id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", + "id": "bafkreigl2v5trzfznb7dm3dubmsbzkw73s3phjm6laegswzl4625wc2grm", + "version_id": "bafkreigl2v5trzfznb7dm3dubmsbzkw73s3phjm6laegswzl4625wc2grm", "fields": []any{ map[string]any{ "id": "0", @@ -852,8 +852,8 @@ type User { "collections": []any{ map[string]any{ "name": "User", - "id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", - "version_id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", + "id": "bafkreiet7xqehjsjsthy6nafvtbz4el376uudhkjyeifuvvsr64se33swm", + "version_id": "bafkreiet7xqehjsjsthy6nafvtbz4el376uudhkjyeifuvvsr64se33swm", }, }, }, v) diff --git a/db/collection.go b/db/collection.go index f5dacccfb1..df8ca85cc1 100644 --- a/db/collection.go +++ b/db/collection.go @@ -158,16 +158,13 @@ func (db *db) createCollection( // Local elements such as secondary indexes should be excluded // from the (global) schemaId. - globalSchemaBuf, err := json.Marshal(struct { - Name string - Schema client.SchemaDescription - }{col.desc.Name, col.desc.Schema}) + schemaBuf, err := json.Marshal(col.desc.Schema) if err != nil { return nil, err } // add a reference to this DB by desc hash - cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) + cid, err := ccid.NewSHA256CidV1(schemaBuf) if err != nil { return nil, err } diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go index bdf54e3a38..db6b1e6417 100644 --- a/db/p2p_collection_test.go +++ b/db/p2p_collection_test.go @@ -28,6 +28,7 @@ func newTestCollection( desc := client.CollectionDescription{ Name: name, Schema: client.SchemaDescription{ + Name: name, Fields: []client.FieldDescription{ { Name: "_key", diff --git a/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md b/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md new file mode 100644 index 0000000000..8ed76b8f27 --- /dev/null +++ b/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md @@ -0,0 +1,3 @@ +# Remove collection name from schema ID generation + +The collection name was removed from the schema ID generation, this caused test schema IDs and commit CIDs to change. Will also impact production systems, as identical schemas created on different defra versions would not have the same IDs. \ No newline at end of file diff --git a/tests/integration/cli/client_schema_add_test.go b/tests/integration/cli/client_schema_add_test.go index 12d2e5e539..124fcba82a 100644 --- a/tests/integration/cli/client_schema_add_test.go +++ b/tests/integration/cli/client_schema_add_test.go @@ -30,7 +30,7 @@ func TestAddSchemaFromFile(t *testing.T) { nodeLog := stopDefra() - jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja","version_id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja"}],"result":"success"}}` + jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreifxwnqwcg3uqqr3iydebnmeadmjxg722qauocdtjbusinjtzja7py","version_id":"bafkreifxwnqwcg3uqqr3iydebnmeadmjxg722qauocdtjbusinjtzja7py"}],"result":"success"}}` assert.Contains(t, stdout, jsonReponse) assertNotContainsSubstring(t, nodeLog, "ERROR") } @@ -47,7 +47,7 @@ func TestAddSchemaWithDuplicateType(t *testing.T) { _ = stopDefra() - jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a","version_id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a"}],"result":"success"}}` + jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreibamgkyo3juvgx2b3ice4tjldcuxiibwo32kq22vfuyvzzgg7kfga","version_id":"bafkreibamgkyo3juvgx2b3ice4tjldcuxiibwo32kq22vfuyvzzgg7kfga"}],"result":"success"}}` assertContainsSubstring(t, stdout1, jsonReponse) assertContainsSubstring(t, stdout2, `schema type already exists. Name: Post`) } diff --git a/tests/integration/cli/client_schema_migration_set_test.go b/tests/integration/cli/client_schema_migration_set_test.go index d97a4e77d8..b9c0c5009f 100644 --- a/tests/integration/cli/client_schema_migration_set_test.go +++ b/tests/integration/cli/client_schema_migration_set_test.go @@ -200,8 +200,8 @@ func TestSchemaMigrationSet_GivenCfgWithLenses_ShouldSucceedAndMigrateDoc(t *tes stdout, _ = runDefraCommand(t, conf, []string{ "client", "schema", "migration", "set", - "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), }) assertContainsSubstring(t, stdout, "success") @@ -229,8 +229,8 @@ func TestSchemaMigrationSet_GivenCfgWithLenseError_ShouldError(t *testing.T) { stdout, _ = runDefraCommand(t, conf, []string{ "client", "schema", "migration", "set", - "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", // Do not set lens parameters in order to generate error fmt.Sprintf(`{"lenses": [{"path":"%s"}]}`, lenses.SetDefaultModulePath), }) diff --git a/tests/integration/events/simple/with_update_test.go b/tests/integration/events/simple/with_update_test.go index 1119e61313..7674c33f18 100644 --- a/tests/integration/events/simple/with_update_test.go +++ b/tests/integration/events/simple/with_update_test.go @@ -64,14 +64,14 @@ func TestEventsSimpleWithUpdate(t *testing.T) { ExpectedUpdates: []testUtils.ExpectedUpdate{ { DocKey: immutable.Some(docKey1), - Cid: immutable.Some("bafybeifugdzbm7y3eihxe7wbldyesxeh6s6m62ghvwipphtld547rfi4cu"), + Cid: immutable.Some("bafybeibqj76txdyzn7mc4j45mjm55i4re3uda2uhf7y5p7w4zcp443deey"), }, { DocKey: immutable.Some(docKey2), }, { DocKey: immutable.Some(docKey1), - Cid: immutable.Some("bafybeihqwcasy4mnwcyrnd2n5hdkg745vyj3qidporvamrhfkjqxihsmqm"), + Cid: immutable.Some("bafybeihydikygu7dx5z5rghtjwlqyirlqcd2lbrinnloqbr4hwx4e6xbc4"), }, }, } diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index 6c28e898f7..216c8cf1d9 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafybeifugdzbm7y3eihxe7wbldyesxeh6s6m62ghvwipphtld547rfi4cu", + "cid": "bafybeibqj76txdyzn7mc4j45mjm55i4re3uda2uhf7y5p7w4zcp443deey", }, }, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go index e1e75a25c0..65d6cfd6ce 100644 --- a/tests/integration/net/state/simple/replicator/with_create_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_test.go @@ -442,7 +442,7 @@ func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { "name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + "schemaVersionId": "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", }, }, }, @@ -502,7 +502,7 @@ func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { "_key": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", "_version": []map[string]any{ { - "schemaVersionId": "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + "schemaVersionId": "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", }, }, }, diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index ffd558f2ee..5a162a8298 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiepxkcirsd56mtrnzv3nbuqsfjeu77jk7dw5pmxvzmye2agyhzfta", + "cid": "bafybeidvkbxozsr7qkhghljwth7g2sfipttevkyxbstqchoa26tqeopdjq", }, { - "cid": "bafybeidjkzqj3yub3k4sulnaa444zuuy7yo2ku4gl4qxvmcopgoynafyae", + "cid": "bafybeifwd2xgjtokmauglml4jotv2sszmdux27ttiqtbc6vvfdzaabsw7y", }, { - "cid": "bafybeiexzvyjil7s5cxicetgu4kriiuqspbzm7hpd353q5kmqbpqky26hq", + "cid": "bafybeihbevhd4dcydunuhq4otjdpw4uyk2lfcqa5zfrrn7jyk3e4rcieom", }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -125,16 +125,16 @@ func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", + "schemaVersionId": "bafkreig2vbhp6udwzcn2grqjnr7b3qbzperexbdazyovgwvkotpq3ehmny", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", + "schemaVersionId": "bafkreig2vbhp6udwzcn2grqjnr7b3qbzperexbdazyovgwvkotpq3ehmny", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", + "schemaVersionId": "bafkreig2vbhp6udwzcn2grqjnr7b3qbzperexbdazyovgwvkotpq3ehmny", }, }, }, diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 6d8c30d73e..7ce0c9cce5 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4" + cid: "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq" + cid: "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index 12acde76e5..31c3e81a0c 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeids3sq532txo55elkc5eba7ns65s5lfbkx5x7cg2sxtpm253cyw5i", "height": int64(3), }, { // Composite head -1 - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { // "Age" field head - "cid": "bafybeiegusf5ypa7htxwa6u4fvne3lqq2jafe4fxllh4lo6iw4xdsn4yyq", + "cid": "bafybeidb3snylmwsqdrroaunenha7w2lvsxksxrpc7tljp6hmhulxghncy", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihncdw7dmswtccv7sluutfb36wunsunxjtt6i3tjgsdrum23nff3y", + "cid": "bafybeigzsydwfezqbixozv566kw4b4tspaq7ioxdq2kdepzea5jicbedky", }, { - "cid": "bafybeibcs77pp5dy7qnph5fm3n6bhw74opbj2b6t66dfau37agoqvhypfm", + "cid": "bafybeigua5iz5vmzjsj3mgeenl6sb3ibt3iyoo4qqv2qotslufktfb75wi", }, { - "cid": "bafybeidy7d44vt5aizivwq6oejqejkog7hl43ckjc35yoidw5qv5kngfma", + "cid": "bafybeie7igdpknhuiaoarh4na635wxagkikredupeb5p4rrpq7rxqdzlcy", }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_cid_test.go b/tests/integration/query/commits/with_dockey_cid_test.go index c6c9c3e7e0..0cda06e63c 100644 --- a/tests/integration/query/commits/with_dockey_cid_test.go +++ b/tests/integration/query/commits/with_dockey_cid_test.go @@ -104,14 +104,14 @@ func TestQueryCommitsWithDockeyAndCid(t *testing.T) { Request: ` { commits( dockey: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - cid: "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe" + cid: "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_count_test.go b/tests/integration/query/commits/with_dockey_count_test.go index dc64c6847b..0843cc68bc 100644 --- a/tests/integration/query/commits/with_dockey_count_test.go +++ b/tests/integration/query/commits/with_dockey_count_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDockeyAndLinkCount(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "_count": 0, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "_count": 0, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_dockey_field_test.go b/tests/integration/query/commits/with_dockey_field_test.go index 77857a23fe..3ce1db4227 100644 --- a/tests/integration/query/commits/with_dockey_field_test.go +++ b/tests/integration/query/commits/with_dockey_field_test.go @@ -118,7 +118,7 @@ func TestQueryCommitsWithDockeyAndFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, }, }, @@ -150,7 +150,7 @@ func TestQueryCommitsWithDockeyAndCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_limit_offset_test.go b/tests/integration/query/commits/with_dockey_limit_offset_test.go index 3ec10284e3..e445dc6b42 100644 --- a/tests/integration/query/commits/with_dockey_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_limit_offset_test.go @@ -57,10 +57,10 @@ func TestQueryCommitsWithDockeyAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeids3sq532txo55elkc5eba7ns65s5lfbkx5x7cg2sxtpm253cyw5i", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_limit_test.go b/tests/integration/query/commits/with_dockey_limit_test.go index 4b87bfa307..355981b382 100644 --- a/tests/integration/query/commits/with_dockey_limit_test.go +++ b/tests/integration/query/commits/with_dockey_limit_test.go @@ -50,10 +50,10 @@ func TestQueryCommitsWithDockeyAndLimit(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeids3sq532txo55elkc5eba7ns65s5lfbkx5x7cg2sxtpm253cyw5i", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go index 1b1a8fe885..e7b443f313 100644 --- a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go @@ -58,11 +58,11 @@ func TestQueryCommitsWithDockeyAndOrderAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeids3sq532txo55elkc5eba7ns65s5lfbkx5x7cg2sxtpm253cyw5i", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_dockey_order_test.go b/tests/integration/query/commits/with_dockey_order_test.go index c2d1aac620..5f11bd3469 100644 --- a/tests/integration/query/commits/with_dockey_order_test.go +++ b/tests/integration/query/commits/with_dockey_order_test.go @@ -44,23 +44,23 @@ func TestQueryCommitsWithDockeyAndOrderHeightDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "height": int64(1), }, }, @@ -99,23 +99,23 @@ func TestQueryCommitsWithDockeyAndOrderHeightAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, }, @@ -154,24 +154,24 @@ func TestQueryCommitsWithDockeyAndOrderCidDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "height": int64(1), + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", + "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", - "height": int64(2), + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", + "height": int64(1), }, }, }, @@ -209,23 +209,23 @@ func TestQueryCommitsWithDockeyAndOrderCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", - "height": int64(2), + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", + "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "height": int64(1), + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", + "height": int64(2), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, }, @@ -278,39 +278,39 @@ func TestQueryCommitsWithDockeyAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeids3sq532txo55elkc5eba7ns65s5lfbkx5x7cg2sxtpm253cyw5i", "height": int64(3), }, { - "cid": "bafybeiegusf5ypa7htxwa6u4fvne3lqq2jafe4fxllh4lo6iw4xdsn4yyq", + "cid": "bafybeidb3snylmwsqdrroaunenha7w2lvsxksxrpc7tljp6hmhulxghncy", "height": int64(3), }, { - "cid": "bafybeigicex7hqzhzltm3adsx34rnzhp7lgubtrusxukk54whosmtfun7y", + "cid": "bafybeieu4fr3imdr7ituubknfjw7kl2mxjhnitvmvb2u5moolrjeetyjq4", "height": int64(4), }, { - "cid": "bafybeihv6d4fo7q5pziriv4rz3loq6unr3fegdonjcuyw5stano5r7dm4i", + "cid": "bafybeiakwirtapzmvz3qkussybgas3v3v7sot7mjmxakklxpqz4ixrkqgy", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_dockey_test.go b/tests/integration/query/commits/with_dockey_test.go index 8e21007f3e..f13d932ab4 100644 --- a/tests/integration/query/commits/with_dockey_test.go +++ b/tests/integration/query/commits/with_dockey_test.go @@ -62,13 +62,13 @@ func TestQueryCommitsWithDockey(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -102,22 +102,22 @@ func TestQueryCommitsWithDockeyAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "links": []map[string]any{}, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "links": []map[string]any{}, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "name": "name", }, }, @@ -158,23 +158,23 @@ func TestQueryCommitsWithDockeyAndUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "height": int64(1), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "height": int64(2), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "height": int64(1), }, }, @@ -219,44 +219,44 @@ func TestQueryCommitsWithDockeyAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "name": "_head", }, }, }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "links": []map[string]any{}, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "links": []map[string]any{}, }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", "links": []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "name": "_head", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", "name": "age", }, }, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "name": "name", }, }, diff --git a/tests/integration/query/commits/with_dockey_typename_test.go b/tests/integration/query/commits/with_dockey_typename_test.go index 106d0ff326..d5c96f8534 100644 --- a/tests/integration/query/commits/with_dockey_typename_test.go +++ b/tests/integration/query/commits/with_dockey_typename_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDockeyWithTypeName(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "__typename": "Commit", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "__typename": "Commit", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index e355db1710..ce541a8316 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -131,8 +131,8 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing. }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", + "schemaVersionId": "bafkreig2vbhp6udwzcn2grqjnr7b3qbzperexbdazyovgwvkotpq3ehmny", }, }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index 86822aac06..c9843059a0 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeicddzzjp4k6itagzpnsputz5pgq57bu4qpwvrzxq7qi2bwguvsine", }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeic2z67t72ty7op6aoqzpz7larpubb473naqipho7rftoivkmubh7a", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "_group": []map[string]any{ { "height": int64(1), diff --git a/tests/integration/query/latest_commits/with_dockey_field_test.go b/tests/integration/query/latest_commits/with_dockey_field_test.go index f85689c982..e11ca9fa6a 100644 --- a/tests/integration/query/latest_commits/with_dockey_field_test.go +++ b/tests/integration/query/latest_commits/with_dockey_field_test.go @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocKeyAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "links": []map[string]any{}, }, }, @@ -101,14 +101,14 @@ func TestQueryLatestCommitsWithDocKeyAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "name": "name", }, }, diff --git a/tests/integration/query/latest_commits/with_dockey_test.go b/tests/integration/query/latest_commits/with_dockey_test.go index 6fb8771f48..b496672c77 100644 --- a/tests/integration/query/latest_commits/with_dockey_test.go +++ b/tests/integration/query/latest_commits/with_dockey_test.go @@ -38,14 +38,14 @@ func TestQueryLatestCommitsWithDocKey(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeic4x7hxoh7yhqmvo7c3mqoyv6j7lnnajkt2hzf2j3mjaf6wmwwl6u", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidd6rsya2q5gxaarx52da22ih5jdn5wgxsfehcuwquffgjvmdrh34", "name": "name", }, }, @@ -75,8 +75,8 @@ func TestQueryLatestCommitsWithDocKeyWithSchemaVersionIdField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeiax37emgcmyjjsiae7kwqis675whyc73wth44amhcmsndfygfhl7m", + "schemaVersionId": "bafkreig2vbhp6udwzcn2grqjnr7b3qbzperexbdazyovgwvkotpq3ehmny", }, }, } diff --git a/tests/integration/query/one_to_many/with_cid_dockey_test.go b/tests/integration/query/one_to_many/with_cid_dockey_test.go index aa8dd1906e..1754c91269 100644 --- a/tests/integration/query/one_to_many/with_cid_dockey_test.go +++ b/tests/integration/query/one_to_many/with_cid_dockey_test.go @@ -68,7 +68,7 @@ func TestQueryOneToManyWithCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne" + cid: "bafybeiagqehqmtyxzcdujrrwho5uf7lxsewnb2hjy2gilbod26m7ybhja4" dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -117,7 +117,7 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with child update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne", + cid: "bafybeiagqehqmtyxzcdujrrwho5uf7lxsewnb2hjy2gilbod26m7ybhja4", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -173,7 +173,7 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with parent update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne", + cid: "bafybeiagqehqmtyxzcdujrrwho5uf7lxsewnb2hjy2gilbod26m7ybhja4", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -229,7 +229,7 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with parent update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeiavnr6gu2ccfm2ygc2m2nsqbhnoorhjf2p6f2rq4rkggjz55je5ym", + cid: "bafybeianxv6tvc4esjfh4aufbfv3skrpjd5djv5zl72xun43vvg27p326y", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name diff --git a/tests/integration/query/simple/with_cid_dockey_test.go b/tests/integration/query/simple/with_cid_dockey_test.go index 343da0bd9e..c8e38ccbf6 100644 --- a/tests/integration/query/simple/with_cid_dockey_test.go +++ b/tests/integration/query/simple/with_cid_dockey_test.go @@ -73,7 +73,7 @@ func TestQuerySimpleWithCidAndDocKey(t *testing.T) { Description: "Simple query with cid and dockey", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeigyyj2jvd4265aalvq6wctzz7jz36rluxrevlnx2hewvowlq672na", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -102,7 +102,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "Simple query with (first) cid and dockey", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeigyyj2jvd4265aalvq6wctzz7jz36rluxrevlnx2hewvowlq672na", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -143,7 +143,7 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocKey(t *testing.T) { Description: "Simple query with (last) cid and dockey", Request: `query { Users ( - cid: "bafybeih4is4so2tw47gfjsty6nk7fvcggd3uyq5tgiw4yjnobtbdwnqdoi" + cid: "bafybeihldrl36evcdi23igf3zjimmduhd65cgbnu25k5kw74yppou5x37a" dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -184,7 +184,7 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocKey(t *testing.T) { Description: "Simple query with (middle) cid and dockey", Request: `query { Users ( - cid: "bafybeigvkbbe6e5ztwz7qtwu3xg2zj4stpiwkhyj6qbu6ir3qdmrd3bhem", + cid: "bafybeidc7zcwnhto3pvsrtph3txv57no5xdjrca4jv5dzzacuedkr2osmm", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -225,7 +225,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKeyAndSchemaVersion(t *testing.T) Description: "Simple query with (first) cid and dockey and yielded schema version", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeigyyj2jvd4265aalvq6wctzz7jz36rluxrevlnx2hewvowlq672na", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -259,7 +259,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKeyAndSchemaVersion(t *testing.T) "Age": uint64(21), "_version": []map[string]any{ { - "schemaVersionId": "bafkreicl3pjcorfcaexxmqcrilkhx7xl37o6b34nxgtiauygtl7hrqbhoq", + "schemaVersionId": "bafkreigrd4xdnprbzdh5bx3igtx2tayfbwp2f27pw3j3xjcbys7jekxfsm", }, }, }, diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index 900ac48b40..656e4058bb 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -46,14 +46,14 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { "Age": uint64(21), "_version": []map[string]any{ { - "cid": "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + "cid": "bafybeigyyj2jvd4265aalvq6wctzz7jz36rluxrevlnx2hewvowlq672na", "links": []map[string]any{ { - "cid": "bafybeieqi3u6kdbsb76qrfziiiabs52ztptecry34lo46cwfbqmf3u4kwi", + "cid": "bafybeidp5im7msb7ztltaiwfmnprmiljnmkelkif65gurq7fe5hweltjda", "name": "Age", }, { - "cid": "bafybeidoaqrpud2z2d4jnjqqmo3kn5rakr7yh2d2cdmjkvk5fcisy54jam", + "cid": "bafybeicjhyipphwodecw6pucrleohxeny4mnikjs3h5d3jrvdfs3cep6wu", "name": "Name", }, }, @@ -90,7 +90,7 @@ func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionId(t *testing.T) { "Name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreicl3pjcorfcaexxmqcrilkhx7xl37o6b34nxgtiauygtl7hrqbhoq", + "schemaVersionId": "bafkreigrd4xdnprbzdh5bx3igtx2tayfbwp2f27pw3j3xjcbys7jekxfsm", }, }, }, @@ -171,14 +171,14 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { "Age": uint64(21), "_version": []map[string]any{ { - "cid": "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + "cid": "bafybeigyyj2jvd4265aalvq6wctzz7jz36rluxrevlnx2hewvowlq672na", "L1": []map[string]any{ { - "cid": "bafybeieqi3u6kdbsb76qrfziiiabs52ztptecry34lo46cwfbqmf3u4kwi", + "cid": "bafybeidp5im7msb7ztltaiwfmnprmiljnmkelkif65gurq7fe5hweltjda", "name": "Age", }, { - "cid": "bafybeidoaqrpud2z2d4jnjqqmo3kn5rakr7yh2d2cdmjkvk5fcisy54jam", + "cid": "bafybeicjhyipphwodecw6pucrleohxeny4mnikjs3h5d3jrvdfs3cep6wu", "name": "Name", }, }, diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index 56f94b2e6b..d6075f3496 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQuery(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -115,8 +115,8 @@ func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -178,8 +178,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -254,8 +254,8 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { // Register a migration from schema version 1 to schema version 2 **only** - // there should be no migration from version 2 to version 3. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -325,8 +325,8 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { // Register a migration from schema version 2 to schema version 3 **only** - // there should be no migration from version 1 to version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", + DestinationSchemaVersionID: "bafkreigmkj5aa7qnqgszkzf5r5buqwmowtqu7m34ym5ycsozp7nh73aiwe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -394,8 +394,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -411,8 +411,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", + DestinationSchemaVersionID: "bafkreigmkj5aa7qnqgszkzf5r5buqwmowtqu7m34ym5ycsozp7nh73aiwe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -539,8 +539,8 @@ func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -600,8 +600,8 @@ func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreic427cayffkscmp2ng224wpmsryzwz5aec6dhbfr2xoljb4xbugji", - DestinationSchemaVersionID: "bafkreidrmuahiz4qenylm247udlro732ip3adwv3dqpeds3s2kghwtfvt4", + SourceSchemaVersionID: "bafkreiegav3vo2lwbwivu63u7bdxkdutb6qemnl4uxipqq6nzeqyikbr2i", + DestinationSchemaVersionID: "bafkreig4mjggx4hqxxeqih3r6l3jrjuifxa3rcquio3hzfs6x635gakle4", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -663,8 +663,8 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -724,8 +724,8 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -798,8 +798,8 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -860,8 +860,8 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreig2pfhv2b7htpvshcqftuj4yrfu5ptxulr6ul6pjcaweyz43yz5ay", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_dockey_test.go b/tests/integration/schema/migrations/query/with_dockey_test.go index db58c9f066..b4bc60aab8 100644 --- a/tests/integration/schema/migrations/query/with_dockey_test.go +++ b/tests/integration/schema/migrations/query/with_dockey_test.go @@ -52,8 +52,8 @@ func TestSchemaMigrationQueryByDocKey(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -158,8 +158,8 @@ func TestSchemaMigrationQueryMultipleQueriesByDocKey(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index 7543b3275a..303ea82598 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -46,8 +46,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", - DestinationSchemaVersionID: "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e", + SourceSchemaVersionID: "bafkreicpuajqg5f4i3vkrdn5jhhcqbkxd5g4aaypkxbo52hfx53vuhjbhm", + DestinationSchemaVersionID: "bafkreig22ggpbxpf42hderhl36hb46ypovlljtkpgospl7dyaon4rdnida", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -136,8 +136,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", - DestinationSchemaVersionID: "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e", + SourceSchemaVersionID: "bafkreicpuajqg5f4i3vkrdn5jhhcqbkxd5g4aaypkxbo52hfx53vuhjbhm", + DestinationSchemaVersionID: "bafkreig22ggpbxpf42hderhl36hb46ypovlljtkpgospl7dyaon4rdnida", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -238,8 +238,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch // Register a migration from version 2 to version 3 on both nodes. // There is no migration from version 1 to 2, thus node 1 has no knowledge of schema version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", + DestinationSchemaVersionID: "bafkreigmkj5aa7qnqgszkzf5r5buqwmowtqu7m34ym5ycsozp7nh73aiwe", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index 2c1253bfd0..a2ba505cb7 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go index e276bcab24..3c1a873c3e 100644 --- a/tests/integration/schema/migrations/query/with_set_default_test.go +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -22,7 +22,7 @@ import ( ) func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t *testing.T) { - schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + schemaVersionID2 := "bafkreig22ggpbxpf42hderhl36hb46ypovlljtkpgospl7dyaon4rdnida" test := testUtils.TestCase{ Description: "Test schema migration", @@ -50,7 +50,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", + SourceSchemaVersionID: "bafkreicpuajqg5f4i3vkrdn5jhhcqbkxd5g4aaypkxbo52hfx53vuhjbhm", DestinationSchemaVersionID: schemaVersionID2, Lens: model.Lens{ Lenses: []model.LensModule{ @@ -89,8 +89,8 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * } func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t *testing.T) { - schemaVersionID1 := "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm" - schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + schemaVersionID1 := "bafkreicpuajqg5f4i3vkrdn5jhhcqbkxd5g4aaypkxbo52hfx53vuhjbhm" + schemaVersionID2 := "bafkreig22ggpbxpf42hderhl36hb46ypovlljtkpgospl7dyaon4rdnida" test := testUtils.TestCase{ Description: "Test schema migration", @@ -164,8 +164,8 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t } func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt_ClearsMigrations(t *testing.T) { - schemaVersionID1 := "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm" - schemaVersionID2 := "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e" + schemaVersionID1 := "bafkreicpuajqg5f4i3vkrdn5jhhcqbkxd5g4aaypkxbo52hfx53vuhjbhm" + schemaVersionID2 := "bafkreig22ggpbxpf42hderhl36hb46ypovlljtkpgospl7dyaon4rdnida" test := testUtils.TestCase{ Description: "Test schema migration", diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go index 3c55fd7748..7d73288e01 100644 --- a/tests/integration/schema/migrations/query/with_txn_test.go +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -47,8 +47,8 @@ func TestSchemaMigrationQueryWithTxn(t *testing.T) { testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -109,8 +109,8 @@ func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go index 35c6965ead..6ab0957634 100644 --- a/tests/integration/schema/migrations/query/with_update_test.go +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -45,8 +45,8 @@ func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -129,8 +129,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go index b63be03b5a..dcc8d98a65 100644 --- a/tests/integration/schema/migrations/simple_test.go +++ b/tests/integration/schema/migrations/simple_test.go @@ -91,8 +91,8 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -124,8 +124,8 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, }, { - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", + DestinationSchemaVersionID: "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index 1722531568..b9c9fdf7a6 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -17,8 +17,8 @@ import ( ) func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoin(t *testing.T) { - initialSchemaVersionId := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" - updatedSchemaVersionId := "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e" + initialSchemaVersionId := "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu" + updatedSchemaVersionId := "bafkreigbscmhyynybxtdvuszqvttgc425rwiy4uz4iiu4v7olrz5rg3oby" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, verison join", @@ -105,8 +105,8 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi } func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { - initialSchemaVersionId := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" - updatedSchemaVersionId := "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e" + initialSchemaVersionId := "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu" + updatedSchemaVersionId := "bafkreigbscmhyynybxtdvuszqvttgc425rwiy4uz4iiu4v7olrz5rg3oby" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, commits query", diff --git a/tests/integration/schema/updates/move/simple_test.go b/tests/integration/schema/updates/move/simple_test.go index 60e0611746..8d60ffe1ab 100644 --- a/tests/integration/schema/updates/move/simple_test.go +++ b/tests/integration/schema/updates/move/simple_test.go @@ -17,7 +17,7 @@ import ( ) func TestSchemaUpdatesMoveCollectionDoesNothing(t *testing.T) { - schemaVersionID := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" + schemaVersionID := "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu" test := testUtils.TestCase{ Description: "Test schema update, move collection", diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go index 3b365e0e5f..1551aff972 100644 --- a/tests/integration/schema/with_update_set_default_test.go +++ b/tests/integration/schema/with_update_set_default_test.go @@ -92,7 +92,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable( SetAsDefaultVersion: immutable.Some(false), }, testUtils.SetDefaultSchemaVersion{ - SchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", + SchemaVersionID: "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", }, testUtils.Request{ Request: `query { @@ -129,7 +129,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t * SetAsDefaultVersion: immutable.Some(false), }, testUtils.SetDefaultSchemaVersion{ - SchemaVersionID: "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e", + SchemaVersionID: "bafkreigbscmhyynybxtdvuszqvttgc425rwiy4uz4iiu4v7olrz5rg3oby", }, testUtils.Request{ Request: `query { From 4c3df487c134a29ebb48f0bdac9cc5cdacd8087c Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Mon, 2 Oct 2023 12:16:08 -0700 Subject: [PATCH 11/55] refactor: CLI client interface (#1839) ## Relevant issue(s) Closes #1472 Closes #1507 Closes #1860 ## Description This is a follow up to #1776 This PR adds a CLI implementation that implements the client.DB interface and runs through the existing integration test suite. - [x] Merge existing server config code - [x] Refactor CLI to use new HTTP client - [x] Remove `net/api` package - [x] Remove `api/http` package - [x] Lens tests are timing out in CI: fixed #1862 - [x] Code coverage is incorrectly reporting: fixed #1861 - [x] Flaky test causing failures: fixed #1912 Renamed Commands: - `peerid` to `client peer info` - `client p2pcollection` to `client p2p collection` - `client replicator` to `client p2p replicator` - `client schema list` to `client collection describe` Removed Commands: - `block get` - `ping` - `rpc` Added Commands: - `client collection create` - `client collection delete` - `client collection get` - `client collection keys` - `client collection update` - `client tx create` - `client tx discard` - `client tx commit` - `client schema migration up` - `client schema migration down` - `client schema migration reload` **Notes for reviewers**: - `.github` changes are merged from #1871 - `Makefile` most of these changes are also from #1871 - `docs/cli` ignore these changes, it will be updated next release - sorry for all of the merge commits, I am working on learning rebase flow ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- .github/workflows/code-test-coverage.yml | 76 - .github/workflows/detect-change.yml | 55 - .../run-tests-and-upload-coverage.yml | 103 + .github/workflows/run-tests.yml | 50 - .github/workflows/test-collection-named.yml | 54 - .github/workflows/test-gql-mutations.yml | 48 - .gitignore | 1 + Makefile | 82 +- api/http/errors.go | 89 - api/http/errors_test.go | 169 -- api/http/handler.go | 139 - api/http/handler_test.go | 312 --- api/http/handlerfuncs.go | 475 ---- api/http/handlerfuncs_backup.go | 123 - api/http/handlerfuncs_backup_test.go | 623 ----- api/http/handlerfuncs_index.go | 144 - api/http/handlerfuncs_index_test.go | 239 -- api/http/handlerfuncs_test.go | 1184 --------- api/http/logger.go | 84 - api/http/logger_test.go | 124 - api/http/request_result.go | 31 - api/http/router.go | 83 - api/http/router_test.go | 50 - api/http/server.go | 322 --- cli/backup_export.go | 77 +- cli/backup_export_test.go | 300 --- cli/backup_import.go | 76 +- cli/backup_import_test.go | 129 - cli/blocks_get.go | 80 - cli/cli.go | 224 +- cli/cli_test.go | 59 - cli/client.go | 16 +- cli/collection.go | 77 + cli/collection_create.go | 102 + cli/collection_delete.go | 78 + cli/collection_describe.go | 57 + cli/collection_get.go | 53 + cli/collection_keys.go | 53 + cli/collection_update.go | 99 + cli/dump.go | 59 +- cli/errors.go | 131 +- cli/index_create.go | 92 +- cli/index_create_test.go | 244 -- cli/index_drop.go | 90 +- cli/index_drop_test.go | 121 - cli/index_list.go | 87 +- cli/index_list_test.go | 145 -- cli/{blocks.go => p2p.go} | 8 +- cli/p2p_collection.go | 2 +- cli/p2p_collection_add.go | 40 +- cli/p2p_collection_getall.go | 48 +- cli/p2p_collection_remove.go | 40 +- cli/p2p_info.go | 35 + cli/{replicator.go => p2p_replicator.go} | 2 +- cli/p2p_replicator_delete.go | 37 + cli/p2p_replicator_getall.go | 34 + cli/p2p_replicator_set.go | 47 + cli/peerid.go | 101 - cli/peerid_test.go | 100 - cli/ping.go | 79 - cli/replicator_delete.go | 81 - cli/replicator_getall.go | 82 - cli/replicator_set.go | 86 - cli/request.go | 115 +- cli/root.go | 25 +- cli/rpc.go | 36 - cli/schema_add.go | 130 +- cli/schema_list.go | 89 - cli/schema_migration_down.go | 91 + cli/schema_migration_get.go | 72 +- cli/schema_migration_reload.go | 35 + cli/schema_migration_set.go | 132 +- cli/schema_migration_up.go | 91 + cli/schema_patch.go | 125 +- cli/schema_set_default.go | 29 + cli/{serverdump.go => server_dump.go} | 0 cli/start.go | 37 +- api/http/http.go => cli/tx.go | 21 +- cli/tx_commit.go | 41 + cli/tx_create.go | 46 + cli/tx_discard.go | 42 + cli/utils.go | 112 + cli/version.go | 32 +- client/document.go | 20 + cmd/defradb/main.go | 12 +- cmd/genclidocs/{genclidocs.go => main.go} | 27 +- cmd/genmanpages/main.go | 37 +- docs/cli/defradb_client.md | 11 +- docs/cli/defradb_client_backup.md | 1 + docs/cli/defradb_client_backup_export.md | 1 + docs/cli/defradb_client_backup_import.md | 1 + docs/cli/defradb_client_collection.md | 52 + docs/cli/defradb_client_document.md | 38 + docs/cli/defradb_client_document_create.md | 44 + docs/cli/defradb_client_document_delete.md | 46 + docs/cli/defradb_client_document_get.md | 42 + docs/cli/defradb_client_document_keys.md | 41 + docs/cli/defradb_client_document_save.md | 42 + docs/cli/defradb_client_document_update.md | 52 + docs/cli/defradb_client_dump.md | 1 + docs/cli/defradb_client_index.md | 1 + docs/cli/defradb_client_index_create.md | 3 +- docs/cli/defradb_client_index_drop.md | 1 + docs/cli/defradb_client_index_list.md | 1 + ...db_client_rpc.md => defradb_client_p2p.md} | 14 +- ...on.md => defradb_client_p2p_collection.md} | 14 +- ...d => defradb_client_p2p_collection_add.md} | 8 +- ...> defradb_client_p2p_collection_getall.md} | 8 +- ...> defradb_client_p2p_collection_remove.md} | 8 +- ...or.md => defradb_client_p2p_replicator.md} | 12 +- ...> defradb_client_p2p_replicator_delete.md} | 15 +- ...> defradb_client_p2p_replicator_getall.md} | 8 +- ...d => defradb_client_p2p_replicator_set.md} | 13 +- docs/cli/defradb_client_query.md | 1 + docs/cli/defradb_client_rpc_addreplicator.md | 37 - .../defradb_client_rpc_replicator_delete.md | 38 - docs/cli/defradb_client_schema.md | 2 +- docs/cli/defradb_client_schema_add.md | 1 + docs/cli/defradb_client_schema_migration.md | 4 + .../defradb_client_schema_migration_down.md | 37 + .../defradb_client_schema_migration_get.md | 1 + ...defradb_client_schema_migration_reload.md} | 15 +- .../defradb_client_schema_migration_set.md | 1 + .../cli/defradb_client_schema_migration_up.md | 37 + docs/cli/defradb_client_schema_patch.md | 1 + ..._client_blocks.md => defradb_client_tx.md} | 15 +- ..._peerid.md => defradb_client_tx_commit.md} | 13 +- docs/cli/defradb_client_tx_create.md | 38 + ...ks_get.md => defradb_client_tx_discard.md} | 15 +- go.mod | 5 +- go.sum | 2 - http/client.go | 17 +- http/client_collection.go | 11 +- http/client_tx.go | 8 + http/errors.go | 36 +- http/handler.go | 138 + http/handler_ccip_test.go | 12 +- http/handler_collection.go | 9 +- http/handler_lens.go | 20 +- .../handler_playground.go | 0 http/handler_store.go | 12 + http/http_client.go | 14 +- http/middleware.go | 27 +- http/server.go | 397 ++- {api/http => http}/server_test.go | 24 +- http/utils.go | 19 - logging/registry.go | 3 + net/api/client/client.go | 169 -- net/api/pb/Makefile | 18 - net/api/pb/api.pb.go | 1100 -------- net/api/pb/api.proto | 82 - net/api/pb/api_grpc.pb.go | 300 --- net/api/pb/api_vtproto.pb.go | 2316 ----------------- tests/clients/cli/wrapper.go | 419 +++ tests/clients/cli/wrapper_cli.go | 85 + tests/clients/cli/wrapper_collection.go | 405 +++ tests/clients/cli/wrapper_lens.go | 145 ++ tests/clients/cli/wrapper_tx.go | 76 + {http => tests/clients/http}/wrapper.go | 30 +- {http => tests/clients/http}/wrapper_tx.go | 0 .../cli/client_backup_export_test.go | 118 - .../cli/client_backup_import_test.go | 109 - tests/integration/cli/client_blocks_test.go | 41 - .../cli/client_index_create_test.go | 102 - .../integration/cli/client_index_drop_test.go | 118 - .../integration/cli/client_index_list_test.go | 96 - tests/integration/cli/client_peerid_test.go | 34 - tests/integration/cli/client_ping_test.go | 63 - tests/integration/cli/client_query_test.go | 102 - .../cli/client_rpc_p2p_collection_test.go | 13 - .../cli/client_rpc_replicator_test.go | 35 - .../integration/cli/client_schema_add_test.go | 53 - .../cli/client_schema_migration_get_test.go | 110 - .../cli/client_schema_migration_set_test.go | 244 -- .../cli/client_schema_patch_test.go | 53 - tests/integration/cli/init_test.go | 51 - tests/integration/cli/log_config_test.go | 116 - tests/integration/cli/root_test.go | 43 - tests/integration/cli/serverdump_test.go | 28 - tests/integration/cli/start_test.go | 90 - tests/integration/cli/utils.go | 263 -- tests/integration/cli/version_test.go | 46 - tests/integration/results.go | 4 +- tests/integration/utils2.go | 21 +- version/version.go | 2 +- 185 files changed, 4000 insertions(+), 13916 deletions(-) delete mode 100644 .github/workflows/code-test-coverage.yml delete mode 100644 .github/workflows/detect-change.yml create mode 100644 .github/workflows/run-tests-and-upload-coverage.yml delete mode 100644 .github/workflows/run-tests.yml delete mode 100644 .github/workflows/test-collection-named.yml delete mode 100644 .github/workflows/test-gql-mutations.yml delete mode 100644 api/http/errors.go delete mode 100644 api/http/errors_test.go delete mode 100644 api/http/handler.go delete mode 100644 api/http/handler_test.go delete mode 100644 api/http/handlerfuncs.go delete mode 100644 api/http/handlerfuncs_backup.go delete mode 100644 api/http/handlerfuncs_backup_test.go delete mode 100644 api/http/handlerfuncs_index.go delete mode 100644 api/http/handlerfuncs_index_test.go delete mode 100644 api/http/handlerfuncs_test.go delete mode 100644 api/http/logger.go delete mode 100644 api/http/logger_test.go delete mode 100644 api/http/request_result.go delete mode 100644 api/http/router.go delete mode 100644 api/http/router_test.go delete mode 100644 api/http/server.go delete mode 100644 cli/backup_export_test.go delete mode 100644 cli/backup_import_test.go delete mode 100644 cli/blocks_get.go delete mode 100644 cli/cli_test.go create mode 100644 cli/collection.go create mode 100644 cli/collection_create.go create mode 100644 cli/collection_delete.go create mode 100644 cli/collection_describe.go create mode 100644 cli/collection_get.go create mode 100644 cli/collection_keys.go create mode 100644 cli/collection_update.go delete mode 100644 cli/index_create_test.go delete mode 100644 cli/index_drop_test.go delete mode 100644 cli/index_list_test.go rename cli/{blocks.go => p2p.go} (75%) create mode 100644 cli/p2p_info.go rename cli/{replicator.go => p2p_replicator.go} (93%) create mode 100644 cli/p2p_replicator_delete.go create mode 100644 cli/p2p_replicator_getall.go create mode 100644 cli/p2p_replicator_set.go delete mode 100644 cli/peerid.go delete mode 100644 cli/peerid_test.go delete mode 100644 cli/ping.go delete mode 100644 cli/replicator_delete.go delete mode 100644 cli/replicator_getall.go delete mode 100644 cli/replicator_set.go delete mode 100644 cli/rpc.go delete mode 100644 cli/schema_list.go create mode 100644 cli/schema_migration_down.go create mode 100644 cli/schema_migration_reload.go create mode 100644 cli/schema_migration_up.go create mode 100644 cli/schema_set_default.go rename cli/{serverdump.go => server_dump.go} (100%) rename api/http/http.go => cli/tx.go (51%) create mode 100644 cli/tx_commit.go create mode 100644 cli/tx_create.go create mode 100644 cli/tx_discard.go create mode 100644 cli/utils.go rename cmd/genclidocs/{genclidocs.go => main.go} (59%) create mode 100644 docs/cli/defradb_client_collection.md create mode 100644 docs/cli/defradb_client_document.md create mode 100644 docs/cli/defradb_client_document_create.md create mode 100644 docs/cli/defradb_client_document_delete.md create mode 100644 docs/cli/defradb_client_document_get.md create mode 100644 docs/cli/defradb_client_document_keys.md create mode 100644 docs/cli/defradb_client_document_save.md create mode 100644 docs/cli/defradb_client_document_update.md rename docs/cli/{defradb_client_rpc.md => defradb_client_p2p.md} (70%) rename docs/cli/{defradb_client_rpc_p2pcollection.md => defradb_client_p2p_collection.md} (62%) rename docs/cli/{defradb_client_rpc_p2pcollection_add.md => defradb_client_p2p_collection_add.md} (77%) rename docs/cli/{defradb_client_rpc_p2pcollection_getall.md => defradb_client_p2p_collection_getall.md} (78%) rename docs/cli/{defradb_client_rpc_p2pcollection_remove.md => defradb_client_p2p_collection_remove.md} (77%) rename docs/cli/{defradb_client_rpc_replicator.md => defradb_client_p2p_replicator.md} (75%) rename docs/cli/{defradb_client_ping.md => defradb_client_p2p_replicator_delete.md} (67%) rename docs/cli/{defradb_client_rpc_replicator_getall.md => defradb_client_p2p_replicator_getall.md} (82%) rename docs/cli/{defradb_client_rpc_replicator_set.md => defradb_client_p2p_replicator_set.md} (68%) delete mode 100644 docs/cli/defradb_client_rpc_addreplicator.md delete mode 100644 docs/cli/defradb_client_rpc_replicator_delete.md create mode 100644 docs/cli/defradb_client_schema_migration_down.md rename docs/cli/{defradb_client_schema_list.md => defradb_client_schema_migration_reload.md} (65%) create mode 100644 docs/cli/defradb_client_schema_migration_up.md rename docs/cli/{defradb_client_blocks.md => defradb_client_tx.md} (63%) rename docs/cli/{defradb_client_peerid.md => defradb_client_tx_commit.md} (73%) create mode 100644 docs/cli/defradb_client_tx_create.md rename docs/cli/{defradb_client_blocks_get.md => defradb_client_tx_discard.md} (71%) create mode 100644 http/handler.go rename api/http/playground.go => http/handler_playground.go (100%) rename {api/http => http}/server_test.go (92%) delete mode 100644 net/api/client/client.go delete mode 100644 net/api/pb/Makefile delete mode 100644 net/api/pb/api.pb.go delete mode 100644 net/api/pb/api.proto delete mode 100644 net/api/pb/api_grpc.pb.go delete mode 100644 net/api/pb/api_vtproto.pb.go create mode 100644 tests/clients/cli/wrapper.go create mode 100644 tests/clients/cli/wrapper_cli.go create mode 100644 tests/clients/cli/wrapper_collection.go create mode 100644 tests/clients/cli/wrapper_lens.go create mode 100644 tests/clients/cli/wrapper_tx.go rename {http => tests/clients/http}/wrapper.go (90%) rename {http => tests/clients/http}/wrapper_tx.go (100%) delete mode 100644 tests/integration/cli/client_backup_export_test.go delete mode 100644 tests/integration/cli/client_backup_import_test.go delete mode 100644 tests/integration/cli/client_blocks_test.go delete mode 100644 tests/integration/cli/client_index_create_test.go delete mode 100644 tests/integration/cli/client_index_drop_test.go delete mode 100644 tests/integration/cli/client_index_list_test.go delete mode 100644 tests/integration/cli/client_peerid_test.go delete mode 100644 tests/integration/cli/client_ping_test.go delete mode 100644 tests/integration/cli/client_query_test.go delete mode 100644 tests/integration/cli/client_rpc_p2p_collection_test.go delete mode 100644 tests/integration/cli/client_rpc_replicator_test.go delete mode 100644 tests/integration/cli/client_schema_add_test.go delete mode 100644 tests/integration/cli/client_schema_migration_get_test.go delete mode 100644 tests/integration/cli/client_schema_migration_set_test.go delete mode 100644 tests/integration/cli/client_schema_patch_test.go delete mode 100644 tests/integration/cli/init_test.go delete mode 100644 tests/integration/cli/log_config_test.go delete mode 100644 tests/integration/cli/root_test.go delete mode 100644 tests/integration/cli/serverdump_test.go delete mode 100644 tests/integration/cli/start_test.go delete mode 100644 tests/integration/cli/utils.go delete mode 100644 tests/integration/cli/version_test.go diff --git a/.github/workflows/code-test-coverage.yml b/.github/workflows/code-test-coverage.yml deleted file mode 100644 index 65c0a92f1f..0000000000 --- a/.github/workflows/code-test-coverage.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Code Test Coverage Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - code-test-coverage: - name: Code test coverage job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Generate full test coverage report using go-acc - run: make test:coverage - - - name: Upload coverage to Codecov without token, retry on failure - env: - codecov_secret: ${{ secrets.CODECOV_TOKEN }} - if: env.codecov_secret == '' - uses: Wandalen/wretry.action@v1.0.36 - with: - attempt_limit: 5 - attempt_delay: 10000 - action: codecov/codecov-action@v3 - with: | - name: defradb-codecov - files: ./coverage.txt - flags: all-tests - os: 'linux' - fail_ci_if_error: true - verbose: true - - - name: Upload coverage to Codecov with token - env: - codecov_secret: ${{ secrets.CODECOV_TOKEN }} - if: env.codecov_secret != '' - uses: codecov/codecov-action@v3 - with: - token: ${{ env.codecov_secret }} - name: defradb-codecov - files: ./coverage.txt - flags: all-tests - os: 'linux' - fail_ci_if_error: true - verbose: true - # path_to_write_report: ./coverage/codecov_report.txt - # directory: ./coverage/reports/ diff --git a/.github/workflows/detect-change.yml b/.github/workflows/detect-change.yml deleted file mode 100644 index b6272c21cd..0000000000 --- a/.github/workflows/detect-change.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Detect Change Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - detect-change: - name: Detect change job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run detection for changes - run: make test:changes - - ## Uncomment to enable ability to SSH into the runner. - #- name: Setup upterm ssh session for debugging - # uses: lhotari/action-upterm@v1 - # with: - # limit-access-to-actor: true - # limit-access-to-users: shahzadlone diff --git a/.github/workflows/run-tests-and-upload-coverage.yml b/.github/workflows/run-tests-and-upload-coverage.yml new file mode 100644 index 0000000000..f1f8724ced --- /dev/null +++ b/.github/workflows/run-tests-and-upload-coverage.yml @@ -0,0 +1,103 @@ +# Copyright 2022 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Run Tests And Upload Coverage Workflow + +on: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + run-tests: + name: Run tests matrix job + + runs-on: ubuntu-latest + + strategy: + matrix: + client-type: [go, http, cli] + database-type: [badger-file, badger-memory] + mutation-type: [gql, collection-named, collection-save] + detect-changes: [false] + include: + - client-type: go + database-type: badger-memory + mutation-type: collection-save + detect-changes: true + + env: + DEFRA_CLIENT_GO: ${{ matrix.client-type == 'go' }} + DEFRA_CLIENT_HTTP: ${{ matrix.client-type == 'http' }} + DEFRA_CLIENT_CLI: ${{ matrix.client-type == 'cli' }} + DEFRA_BADGER_MEMORY: ${{ matrix.database-type == 'badger-memory' }} + DEFRA_BADGER_FILE: ${{ matrix.database-type == 'badger-file' }} + DEFRA_MUTATION_TYPE: ${{ matrix.mutation-type }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.20" + check-latest: true + + - name: Build dependencies + run: | + make deps:modules + make deps:test + + - name: Run integration tests + if: ${{ !matrix.detect-changes }} + run: make test:coverage + + - name: Run change detector tests + if: ${{ matrix.detect-changes }} + run: make test:changes + + - name: Upload coverage artifact + if: ${{ !matrix.detect-changes }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.client-type }}_${{ matrix.database-type }}_${{ matrix.mutation-type }} + path: coverage.txt + if-no-files-found: error + retention-days: 1 + + upload-coverage: + name: Upload test code coverage job + + runs-on: ubuntu-latest + + needs: run-tests + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Download coverage reports + uses: actions/download-artifact@v3 + with: + path: coverage_reports + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + name: defradb-codecov + flags: all-tests + os: 'linux' + fail_ci_if_error: true + verbose: true diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml deleted file mode 100644 index bfa696a283..0000000000 --- a/.github/workflows/run-tests.yml +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run Tests Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - -jobs: - run-tests: - name: Run tests job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Build binary - run: make build - - # This is to ensure tests pass with a running server. - - name: Start server from binary - run: ./build/defradb start & - - - name: Run the tests, showing name of each test - run: make test:ci diff --git a/.github/workflows/test-collection-named.yml b/.github/workflows/test-collection-named.yml deleted file mode 100644 index 5adabe4fdf..0000000000 --- a/.github/workflows/test-collection-named.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run Collection Named Mutations Tests Workflow - -# This workflow runs the test suite with any supporting mutation test actions -# running their mutations via their corresponding named [Collection] call. -# -# For example, CreateDoc will call [Collection.Create], and -# UpdateDoc will call [Collection.Update]. - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - test-collection-named-mutations: - name: Test Collection Named Mutations job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run tests with Collection Named mutations - run: make test:ci-col-named-mutations diff --git a/.github/workflows/test-gql-mutations.yml b/.github/workflows/test-gql-mutations.yml deleted file mode 100644 index 827dd22098..0000000000 --- a/.github/workflows/test-gql-mutations.yml +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run GQL Mutations Tests Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - test-gql-mutations: - name: Test GQL mutations job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run tests with gql mutations - run: make test:ci-gql-mutations diff --git a/.gitignore b/.gitignore index b19a6d9259..81c1a16d62 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ cmd/defradb/defradb cmd/genclidocs/genclidocs cmd/genmanpages/genmanpages coverage.txt +coverage tests/bench/*.log tests/bench/*.svg diff --git a/Makefile b/Makefile index 60350a6046..7268834d5a 100644 --- a/Makefile +++ b/Makefile @@ -29,13 +29,16 @@ ifdef BUILD_TAGS BUILD_FLAGS+=-tags $(BUILD_TAGS) endif -TEST_FLAGS=-race -shuffle=on -timeout 300s +TEST_FLAGS=-race -shuffle=on -timeout 5m + +COVERAGE_DIRECTORY=$(PWD)/coverage +COVERAGE_FILE=coverage.txt +COVERAGE_FLAGS=-covermode=atomic -coverpkg=./... -args -test.gocoverdir=$(COVERAGE_DIRECTORY) PLAYGROUND_DIRECTORY=playground LENS_TEST_DIRECTORY=tests/integration/schema/migrations -CLI_TEST_DIRECTORY=tests/integration/cli CHANGE_DETECTOR_TEST_DIRECTORY=tests/change_detector -DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY) -e $(CLI_TEST_DIRECTORY)) +DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY)) default: @go run $(BUILD_FLAGS) cmd/defradb/main.go @@ -88,11 +91,6 @@ deps\:lens: rustup target add wasm32-unknown-unknown @$(MAKE) -C ./tests/lenses build -.PHONY: deps\:coverage -deps\:coverage: - go install github.com/ory/go-acc@latest - @$(MAKE) deps:lens - .PHONY: deps\:bench deps\:bench: go install golang.org/x/perf/cmd/benchstat@latest @@ -118,7 +116,6 @@ deps: @$(MAKE) deps:modules && \ $(MAKE) deps:bench && \ $(MAKE) deps:chglog && \ - $(MAKE) deps:coverage && \ $(MAKE) deps:lint && \ $(MAKE) deps:test && \ $(MAKE) deps:mock @@ -161,6 +158,11 @@ clean: clean\:test: go clean -testcache +.PHONY: clean\:coverage +clean\:coverage: + rm -rf $(COVERAGE_DIRECTORY) + rm -f $(COVERAGE_FILE) + # Example: `make tls-certs path="~/.defradb/certs"` .PHONY: tls-certs tls-certs: @@ -186,18 +188,6 @@ test\:quick: test\:build: gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) -run=nope -.PHONY: test\:ci -test\:ci: - DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - -.PHONY: test\:ci-gql-mutations -test\:ci-gql-mutations: - DEFRA_MUTATION_TYPE=gql DEFRA_BADGER_MEMORY=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - .PHONY: test\:gql-mutations test\:gql-mutations: DEFRA_MUTATION_TYPE=gql DEFRA_BADGER_MEMORY=true gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) @@ -207,12 +197,6 @@ test\:gql-mutations: # # For example, CreateDoc will call [Collection.Create], and # UpdateDoc will call [Collection.Update]. -.PHONY: test\:ci-col-named-mutations -test\:ci-col-named-mutations: - DEFRA_MUTATION_TYPE=collection-named DEFRA_BADGER_MEMORY=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - .PHONY: test\:col-named-mutations test\:col-named-mutations: DEFRA_MUTATION_TYPE=collection-named DEFRA_BADGER_MEMORY=true gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) @@ -225,6 +209,10 @@ test\:go: test\:http: DEFRA_CLIENT_HTTP=true go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) +.PHONY: test\:cli +test\:cli: + DEFRA_CLIENT_CLI=true go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + .PHONY: test\:names test\:names: gotestsum --format testname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) @@ -233,7 +221,6 @@ test\:names: test\:all: @$(MAKE) test:names @$(MAKE) test:lens - @$(MAKE) test:cli .PHONY: test\:verbose test\:verbose: @@ -264,38 +251,27 @@ test\:lens: @$(MAKE) deps:lens gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... $(TEST_FLAGS) -.PHONY: test\:cli -test\:cli: - @$(MAKE) deps:lens - gotestsum --format testname -- ./$(CLI_TEST_DIRECTORY)/... $(TEST_FLAGS) - -# Using go-acc to ensure integration tests are included. -# Usage: `make test:coverage` or `make test:coverage path="{pathToPackage}"` -# Example: `make test:coverage path="./api/..."` .PHONY: test\:coverage test\:coverage: - @$(MAKE) deps:coverage -ifeq ($(path),) - go-acc ./... --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=./... - @echo "Show coverage information for each function in ./..." -else - go-acc $(path) --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=$(path) - @echo "Show coverage information for each function in" path=$(path) -endif - go tool cover -func coverage.txt | grep total | awk '{print $$3}' + @$(MAKE) deps:lens + @$(MAKE) clean:coverage + mkdir $(COVERAGE_DIRECTORY) + gotestsum --format testname -- ./... $(TEST_FLAGS) $(COVERAGE_FLAGS) + go tool covdata textfmt -i=$(COVERAGE_DIRECTORY) -o $(COVERAGE_FILE) + +.PHONY: test\:coverage-func +test\:coverage-func: + @$(MAKE) test:coverage + go tool cover -func=$(COVERAGE_FILE) -# Usage: `make test:coverage-html` or `make test:coverage-html path="{pathToPackage}"` -# Example: `make test:coverage-html path="./api/..."` .PHONY: test\:coverage-html test\:coverage-html: - @$(MAKE) test:coverage path=$(path) - @echo "Generate coverage information in HTML" - go tool cover -html=coverage.txt - rm ./coverage.txt + @$(MAKE) test:coverage + go tool cover -html=$(COVERAGE_FILE) .PHONY: test\:changes test\:changes: - gotestsum --format testname -- ./$(CHANGE_DETECTOR_TEST_DIRECTORY)/... --tags change_detector + gotestsum --format testname -- ./$(CHANGE_DETECTOR_TEST_DIRECTORY)/... -timeout 15m --tags change_detector .PHONY: validate\:codecov validate\:codecov: @@ -332,7 +308,7 @@ docs: .PHONY: docs\:cli docs\:cli: - go run cmd/genclidocs/genclidocs.go -o docs/cli/ + go run cmd/genclidocs/main.go -o docs/cli/ .PHONY: docs\:manpages docs\:manpages: diff --git a/api/http/errors.go b/api/http/errors.go deleted file mode 100644 index 4acf9abd25..0000000000 --- a/api/http/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - - "github.com/sourcenetwork/defradb/errors" -) - -var env = os.Getenv("DEFRA_ENV") - -// Errors returnable from this package. -// -// This list is incomplete. Undefined errors may also be returned. -// Errors returned from this package may be tested against these errors with errors.Is. -var ( - ErrNoListener = errors.New("cannot serve with no listener") - ErrSchema = errors.New("base must start with the http or https scheme") - ErrDatabaseNotAvailable = errors.New("no database available") - ErrFormNotSupported = errors.New("content type application/x-www-form-urlencoded not yet supported") - ErrBodyEmpty = errors.New("body cannot be empty") - ErrMissingGQLRequest = errors.New("missing GraphQL request") - ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") - ErrStreamingUnsupported = errors.New("streaming unsupported") - ErrNoEmail = errors.New("email address must be specified for tls with autocert") - ErrPayloadFormat = errors.New("invalid payload format") - ErrMissingNewKey = errors.New("missing _newKey for imported doc") -) - -// ErrorResponse is the GQL top level object holding error items for the response payload. -type ErrorResponse struct { - Errors []ErrorItem `json:"errors"` -} - -// ErrorItem hold an error message and extensions that might be pertinent to the request. -type ErrorItem struct { - Message string `json:"message"` - Extensions extensions `json:"extensions,omitempty"` -} - -type extensions struct { - Status int `json:"status"` - HTTPError string `json:"httpError"` - Stack string `json:"stack,omitempty"` -} - -func handleErr(ctx context.Context, rw http.ResponseWriter, err error, status int) { - if status == http.StatusInternalServerError { - log.ErrorE(ctx, http.StatusText(status), err) - } - - sendJSON( - ctx, - rw, - ErrorResponse{ - Errors: []ErrorItem{ - { - Message: err.Error(), - Extensions: extensions{ - Status: status, - HTTPError: http.StatusText(status), - Stack: formatError(err), - }, - }, - }, - }, - status, - ) -} - -func formatError(err error) string { - if strings.ToLower(env) == "dev" || strings.ToLower(env) == "development" { - return fmt.Sprintf("[DEV] %+v\n", err) - } - return "" -} diff --git a/api/http/errors_test.go b/api/http/errors_test.go deleted file mode 100644 index 9e4a5885c8..0000000000 --- a/api/http/errors_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -func CleanupEnv() { - env = "" -} - -func TestFormatError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "prod" - s := formatError(errors.New("test error")) - assert.Equal(t, "", s) - - env = "dev" - s = formatError(errors.New("test error")) - lines := strings.Split(s, "\n") - assert.Equal(t, "[DEV] test error", lines[0]) -} - -func TestHandleErrOnBadRequest(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusBadRequest) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusBadRequest), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnInternalServerError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusInternalServerError) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusInternalServerError), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnNotFound(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusNotFound) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusNotFound), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnDefault(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("unauthorized"), http.StatusUnauthorized) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusUnauthorized, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusUnauthorized), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "unauthorized", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] unauthorized") -} diff --git a/api/http/handler.go b/api/http/handler.go deleted file mode 100644 index aa7b828f29..0000000000 --- a/api/http/handler.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/go-chi/chi/v5" - "github.com/go-chi/cors" - "github.com/pkg/errors" - - "github.com/sourcenetwork/defradb/client" -) - -type handler struct { - db client.DB - *chi.Mux - - // user configurable options - options serverOptions -} - -// context variables -type ( - ctxDB struct{} - ctxPeerID struct{} -) - -// DataResponse is the GQL top level object holding data for the response payload. -type DataResponse struct { - Data any `json:"data"` -} - -// simpleDataResponse is a helper function that returns a DataResponse struct. -// Odd arguments are the keys and must be strings otherwise they are ignored. -// Even arguments are the values associated with the previous key. -// Odd arguments are also ignored if there are no following arguments. -func simpleDataResponse(args ...any) DataResponse { - data := make(map[string]any) - - for i := 0; i < len(args); i += 2 { - if len(args) >= i+2 { - switch a := args[i].(type) { - case string: - data[a] = args[i+1] - - default: - continue - } - } - } - - return DataResponse{ - Data: data, - } -} - -// newHandler returns a handler with the router instantiated. -func newHandler(db client.DB, opts serverOptions) *handler { - mux := chi.NewRouter() - mux.Use(loggerMiddleware) - - if len(opts.allowedOrigins) != 0 { - mux.Use(cors.Handler(cors.Options{ - AllowedOrigins: opts.allowedOrigins, - AllowedMethods: []string{"GET", "POST", "PATCH", "OPTIONS"}, - AllowedHeaders: []string{"Content-Type"}, - MaxAge: 300, - })) - } - - mux.Use(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if opts.tls.HasValue() { - rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - } - ctx := context.WithValue(req.Context(), ctxDB{}, db) - if opts.peerID != "" { - ctx = context.WithValue(ctx, ctxPeerID{}, opts.peerID) - } - next.ServeHTTP(rw, req.WithContext(ctx)) - }) - }) - - return setRoutes(&handler{ - Mux: mux, - db: db, - options: opts, - }) -} - -func getJSON(req *http.Request, v any) error { - err := json.NewDecoder(req.Body).Decode(v) - if err != nil { - return errors.Wrap(err, "unmarshal error") - } - return nil -} - -func sendJSON(ctx context.Context, rw http.ResponseWriter, v any, code int) { - rw.Header().Set("Content-Type", "application/json") - - b, err := json.Marshal(v) - if err != nil { - log.Error(ctx, fmt.Sprintf("Error while encoding JSON: %v", err)) - rw.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(rw, `{"error": "Internal server error"}`); err != nil { - log.Error(ctx, err.Error()) - } - return - } - - rw.WriteHeader(code) - if _, err = rw.Write(b); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - log.Error(ctx, err.Error()) - } -} - -func dbFromContext(ctx context.Context) (client.DB, error) { - db, ok := ctx.Value(ctxDB{}).(client.DB) - if !ok { - return nil, ErrDatabaseNotAvailable - } - - return db, nil -} diff --git a/api/http/handler_test.go b/api/http/handler_test.go deleted file mode 100644 index 2015c7a0ba..0000000000 --- a/api/http/handler_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "io" - "math" - "net/http" - "net/http/httptest" - "path" - "testing" - - badger "github.com/dgraph-io/badger/v4" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/logging" -) - -func TestSimpleDataResponse(t *testing.T) { - resp := simpleDataResponse("key", "value", "key2", "value2") - switch v := resp.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, "value2", v["key2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - resp2 := simpleDataResponse("key", "value", "key2") - switch v := resp2.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, nil, v["key2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - resp3 := simpleDataResponse("key", "value", 2, "value2") - switch v := resp3.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, nil, v["2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestNewHandlerWithLogger(t *testing.T) { - h := newHandler(nil, serverOptions{}) - - dir := t.TempDir() - - // send logs to temp file so we can inspect it - logFile := path.Join(dir, "http_test.log") - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - OutputPaths: []string{logFile}, - }) - - req, err := http.NewRequest("GET", PingPath, nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - h.ServeHTTP(lrw, req) - assert.Equal(t, 200, rec.Result().StatusCode) - - // inspect the log file - kv, err := readLog(logFile) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http", kv["logger"]) -} - -func TestGetJSON(t *testing.T) { - var obj struct { - Name string - } - - jsonStr := ` -{ - "Name": "John Doe" -}` - - req, err := http.NewRequest("POST", "/ping", bytes.NewBuffer([]byte(jsonStr))) - if err != nil { - t.Fatal(err) - } - - err = getJSON(req, &obj) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "John Doe", obj.Name) -} - -func TestGetJSONWithError(t *testing.T) { - var obj struct { - Name string - } - - jsonStr := ` -{ - "Name": 10 -}` - - req, err := http.NewRequest("POST", "/ping", bytes.NewBuffer([]byte(jsonStr))) - if err != nil { - t.Fatal(err) - } - - err = getJSON(req, &obj) - assert.Error(t, err) -} - -func TestSendJSONWithNoErrors(t *testing.T) { - obj := struct { - Name string - }{ - Name: "John Doe", - } - - rec := httptest.NewRecorder() - - sendJSON(context.Background(), rec, obj, 200) - - body, err := io.ReadAll(rec.Result().Body) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, []byte("{\"Name\":\"John Doe\"}"), body) -} - -func TestSendJSONWithMarshallFailure(t *testing.T) { - rec := httptest.NewRecorder() - - sendJSON(context.Background(), rec, math.Inf(1), 200) - - assert.Equal(t, http.StatusInternalServerError, rec.Result().StatusCode) -} - -type loggerTest struct { - loggingResponseWriter -} - -func (lt *loggerTest) Write(b []byte) (int, error) { - return 0, errors.New("this write will fail") -} - -func TestSendJSONWithMarshallFailureAndWriteFailer(t *testing.T) { - rec := httptest.NewRecorder() - lrw := loggerTest{} - lrw.ResponseWriter = rec - - sendJSON(context.Background(), &lrw, math.Inf(1), 200) - - assert.Equal(t, http.StatusInternalServerError, rec.Result().StatusCode) -} - -func TestSendJSONWithWriteFailure(t *testing.T) { - obj := struct { - Name string - }{ - Name: "John Doe", - } - - rec := httptest.NewRecorder() - lrw := loggerTest{} - lrw.ResponseWriter = rec - - sendJSON(context.Background(), &lrw, obj, 200) - - assert.Equal(t, http.StatusInternalServerError, lrw.statusCode) -} - -func TestDbFromContext(t *testing.T) { - _, err := dbFromContext(context.Background()) - assert.Error(t, err) - - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - t.Fatal(err) - } - - var options []db.Option - ctx := context.Background() - - defra, err := db.NewDB(ctx, rootstore, options...) - if err != nil { - t.Fatal(err) - } - - reqCtx := context.WithValue(ctx, ctxDB{}, defra) - - _, err = dbFromContext(reqCtx) - assert.NoError(t, err) -} - -func TestCORSRequest(t *testing.T) { - cases := []struct { - name string - method string - reqHeaders map[string]string - resHeaders map[string]string - }{ - { - "DisallowedOrigin", - "OPTIONS", - map[string]string{ - "Origin": "https://notsource.network", - }, - map[string]string{ - "Vary": "Origin", - }, - }, - { - "AllowedOrigin", - "OPTIONS", - map[string]string{ - "Origin": "https://source.network", - }, - map[string]string{ - "Access-Control-Allow-Origin": "https://source.network", - "Vary": "Origin", - }, - }, - } - - s := NewServer(nil, WithAllowedOrigins("https://source.network")) - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - req, err := http.NewRequest(c.method, PingPath, nil) - if err != nil { - t.Fatal(err) - } - - for header, value := range c.reqHeaders { - req.Header.Add(header, value) - } - - rec := httptest.NewRecorder() - - s.Handler.ServeHTTP(rec, req) - - for header, value := range c.resHeaders { - assert.Equal(t, value, rec.Result().Header.Get(header)) - } - }) - } -} - -func TestTLSRequestResponseHeader(t *testing.T) { - cases := []struct { - name string - method string - reqHeaders map[string]string - resHeaders map[string]string - }{ - { - "TLSHeader", - "GET", - map[string]string{}, - map[string]string{ - "Strict-Transport-Security": "max-age=63072000; includeSubDomains", - }, - }, - } - dir := t.TempDir() - - s := NewServer(nil, WithTLS(), WithAddress("example.com"), WithRootDir(dir)) - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - req, err := http.NewRequest(c.method, PingPath, nil) - if err != nil { - t.Fatal(err) - } - - for header, value := range c.reqHeaders { - req.Header.Add(header, value) - } - - rec := httptest.NewRecorder() - - s.Handler.ServeHTTP(rec, req) - - for header, value := range c.resHeaders { - assert.Equal(t, value, rec.Result().Header.Get(header)) - } - }) - } -} diff --git a/api/http/handlerfuncs.go b/api/http/handlerfuncs.go deleted file mode 100644 index 2a248d7d81..0000000000 --- a/api/http/handlerfuncs.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "mime" - "net/http" - - "github.com/go-chi/chi/v5" - dshelp "github.com/ipfs/boxo/datastore/dshelp" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - "github.com/multiformats/go-multihash" - - "github.com/sourcenetwork/defradb/client" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" -) - -const ( - contentTypeJSON = "application/json" - contentTypeGraphQL = "application/graphql" - contentTypeFormURLEncoded = "application/x-www-form-urlencoded" -) - -func rootHandler(rw http.ResponseWriter, req *http.Request) { - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "response", "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database."+ - " Read the documentation at https://docs.source.network/.", - ), - http.StatusOK, - ) -} - -func pingHandler(rw http.ResponseWriter, req *http.Request) { - sendJSON( - req.Context(), - rw, - simpleDataResponse("response", "pong"), - http.StatusOK, - ) -} - -func dumpHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = db.PrintDump(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("response", "ok"), - http.StatusOK, - ) -} - -type gqlRequest struct { - Request string `json:"query"` -} - -func execGQLHandler(rw http.ResponseWriter, req *http.Request) { - request := req.URL.Query().Get("query") - if request == "" { - // extract the media type from the content-type header - contentType, _, err := mime.ParseMediaType(req.Header.Get("Content-Type")) - // mime.ParseMediaType will return an error (mime: no media type) - // if there is no media type set (i.e. application/json). - // This however is not a failing condition as not setting the content-type header - // should still make for a valid request and hit our default switch case. - if err != nil && err.Error() != "mime: no media type" { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - switch contentType { - case contentTypeJSON: - gqlReq := gqlRequest{} - - err := getJSON(req, &gqlReq) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - request = gqlReq.Request - - case contentTypeFormURLEncoded: - handleErr( - req.Context(), - rw, - ErrFormNotSupported, - http.StatusBadRequest, - ) - return - - case contentTypeGraphQL: - fallthrough - - default: - if req.Body == nil { - handleErr(req.Context(), rw, ErrBodyEmpty, http.StatusBadRequest) - return - } - body, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, errors.WithStack(err), http.StatusInternalServerError) - return - } - request = string(body) - } - } - - // if at this point request is still empty, return an error - if request == "" { - handleErr(req.Context(), rw, ErrMissingGQLRequest, http.StatusBadRequest) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - result := db.ExecRequest(req.Context(), request) - - if result.Pub != nil { - subscriptionHandler(result.Pub, rw, req) - return - } - - sendJSON(req.Context(), rw, newGQLResult(result.GQL), http.StatusOK) -} - -type fieldResponse struct { - ID string `json:"id"` - Name string `json:"name"` - Kind string `json:"kind"` - Internal bool `json:"internal"` -} - -type collectionResponse struct { - Name string `json:"name"` - ID string `json:"id"` - VersionID string `json:"version_id"` - Fields []fieldResponse `json:"fields,omitempty"` -} - -func listSchemaHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cols, err := db.GetAllCollections(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colResp := make([]collectionResponse, len(cols)) - for i, col := range cols { - var fields []fieldResponse - for _, field := range col.Schema().Fields { - fieldRes := fieldResponse{ - ID: field.ID.String(), - Name: field.Name, - Internal: field.IsInternal(), - } - if field.IsObjectArray() { - fieldRes.Kind = fmt.Sprintf("[%s]", field.Schema) - } else if field.IsObject() { - fieldRes.Kind = field.Schema - } else { - fieldRes.Kind = field.Kind.String() - } - fields = append(fields, fieldRes) - } - colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), - VersionID: col.Schema().VersionID, - Fields: fields, - } - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("collections", colResp), - http.StatusOK, - ) -} - -func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { - sdl, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colDescs, err := db.AddSchema(req.Context(), string(sdl)) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colResp := make([]collectionResponse, len(colDescs)) - for i, desc := range colDescs { - col, err := db.GetCollectionByName(req.Context(), desc.Name) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), - VersionID: col.Schema().VersionID, - } - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success", "collections", colResp), - http.StatusOK, - ) -} - -func patchSchemaHandler(rw http.ResponseWriter, req *http.Request) { - patch, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - // Hardcode setDefault to true here, as that preserves the existing behaviour. - // This function will be ripped out very shortly and I don't think it is worth - // spending time/thought here. The new http api handles this correctly. - err = db.PatchSchema(req.Context(), string(patch), true) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func setMigrationHandler(rw http.ResponseWriter, req *http.Request) { - cfgStr, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var cfg client.LensConfig - err = json.Unmarshal(cfgStr, &cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = db.LensRegistry().SetMigration(req.Context(), cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func getMigrationHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfgs, err := db.LensRegistry().Config(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("configuration", cfgs), - http.StatusOK, - ) -} - -func getBlockHandler(rw http.ResponseWriter, req *http.Request) { - cidStr := chi.URLParam(req, "cid") - - // try to parse CID - cID, err := cid.Decode(cidStr) - if err != nil { - // If we can't try to parse DSKeyToCID - // return error if we still can't - key := ds.NewKey(cidStr) - var hash multihash.Multihash - hash, err = dshelp.DsKeyToMultihash(key) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - cID = cid.NewCidV1(cid.Raw, hash) - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - block, err := db.Blockstore().Get(req.Context(), cID) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - nd, err := dag.DecodeProtobuf(block.RawData()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - buf, err := nd.MarshalJSON() - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - reg := corecrdt.LWWRegister{} - delta, err := reg.DeltaDecode(nd) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - data, err := delta.Marshal() - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "block", string(buf), - "delta", string(data), - "val", delta.Value(), - ), - http.StatusOK, - ) -} - -func peerIDHandler(rw http.ResponseWriter, req *http.Request) { - peerID, ok := req.Context().Value(ctxPeerID{}).(string) - if !ok || peerID == "" { - handleErr(req.Context(), rw, ErrPeerIdUnavailable, http.StatusNotFound) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "peerID", peerID, - ), - http.StatusOK, - ) -} - -func subscriptionHandler(pub *events.Publisher[events.Update], rw http.ResponseWriter, req *http.Request) { - flusher, ok := rw.(http.Flusher) - if !ok { - handleErr(req.Context(), rw, ErrStreamingUnsupported, http.StatusInternalServerError) - return - } - - rw.Header().Set("Content-Type", "text/event-stream") - rw.Header().Set("Cache-Control", "no-cache") - rw.Header().Set("Connection", "keep-alive") - - for { - select { - case <-req.Context().Done(): - pub.Unsubscribe() - return - case s, open := <-pub.Stream(): - if !open { - return - } - b, err := json.Marshal(s) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - fmt.Fprintf(rw, "data: %s\n\n", b) - flusher.Flush() - } - } -} - -// maxBytes is an arbitrary limit to prevent unbounded message bodies being sent and read. -const maxBytes int64 = 100 * (1 << (10 * 2)) // 100MB - -// readWithLimit reads from the reader until either EoF or the maximum number of bytes have been read. -func readWithLimit(reader io.ReadCloser, rw http.ResponseWriter) ([]byte, error) { - reader = http.MaxBytesReader(rw, reader, maxBytes) - - var buf bytes.Buffer - _, err := io.Copy(&buf, reader) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} diff --git a/api/http/handlerfuncs_backup.go b/api/http/handlerfuncs_backup.go deleted file mode 100644 index 3961263995..0000000000 --- a/api/http/handlerfuncs_backup.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "net/http" - "os" - "strings" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" -) - -func exportHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfg := &client.BackupConfig{} - err = getJSON(req, cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = validateBackupConfig(req.Context(), cfg, db) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = db.BasicExport(req.Context(), cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func importHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfg := &client.BackupConfig{} - err = getJSON(req, cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = validateBackupConfig(req.Context(), cfg, db) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = db.BasicImport(req.Context(), cfg.Filepath) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func validateBackupConfig(ctx context.Context, cfg *client.BackupConfig, db client.DB) error { - if !isValidPath(cfg.Filepath) { - return errors.New("invalid file path") - } - - if cfg.Format != "" && strings.ToLower(cfg.Format) != "json" { - return errors.New("only JSON format is supported at the moment") - } - for _, colName := range cfg.Collections { - _, err := db.GetCollectionByName(ctx, colName) - if err != nil { - return errors.Wrap("collection does not exist", err) - } - } - return nil -} - -func isValidPath(filepath string) bool { - // if a file exists, return true - if _, err := os.Stat(filepath); err == nil { - return true - } - - // if not, attempt to write to the path and if successful, - // remove the file and return true - var d []byte - if err := os.WriteFile(filepath, d, 0o644); err == nil { - _ = os.Remove(filepath) - return true - } - - return false -} diff --git a/api/http/handlerfuncs_backup_test.go b/api/http/handlerfuncs_backup_test.go deleted file mode 100644 index 67af6015a1..0000000000 --- a/api/http/handlerfuncs_backup_test.go +++ /dev/null @@ -1,623 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "os" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/client/mocks" - "github.com/sourcenetwork/defradb/errors" -) - -func TestExportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: ExportPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithWrongPayload_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - buf := bytes.NewBuffer([]byte("[]")) - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "json: cannot unmarshal array into Go value of type client.BackupConfig") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidFilePath_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/some/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "invalid file path", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidFomat_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Format: "csv", - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "only JSON format is supported at the moment") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "only JSON format is supported at the moment", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidCollection_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Format: "json", - Collections: []string{"invalid"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "collection does not exist: datastore: key not found") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "collection does not exist: datastore: key not found", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithBasicExportError_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - db := mocks.NewDB(t) - testError := errors.New("test error") - db.EXPECT().BasicExport(mock.Anything, mock.Anything).Return(testError) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: db, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "test error") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "test error", errResponse.Errors[0].Message) -} - -func TestExportHandler_AllCollections_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, - string(b), - ) -} - -func TestExportHandler_UserCollection_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Collections: []string{"User"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, - string(b), - ) -} - -func TestExportHandler_UserCollectionWithModifiedDoc_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - err = doc.Set("points", 1000) - require.NoError(t, err) - - err = col.Update(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Collections: []string{"User"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-36697142-d46a-57b1-b25e-6336706854ea","age":31,"name":"Bob","points":1000,"verified":true}]}`, - string(b), - ) -} - -func TestImportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: ImportPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithWrongPayloadFormat_UnmarshalError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - buf := bytes.NewBuffer([]byte(`[]`)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains( - t, - errResponse.Errors[0].Extensions.Stack, - "json: cannot unmarshal array into Go value of type client.BackupConfig", - ) - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", - errResponse.Errors[0].Message, - ) -} - -func TestImportHandler_WithInvalidFilepath_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/some/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "invalid file path", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithDBClosed_DatastoreClosedError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "datastore closed") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "datastore closed", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithUnknownCollection_KeyNotFoundError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "failed to get collection: datastore: key not found. Name: User") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "failed to get collection: datastore: key not found. Name: User", errResponse.Errors[0].Message) -} - -func TestImportHandler_UserCollection_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - filepath := t.TempDir() + "/test.json" - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - resp := DataResponse{} - _ = testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "success", v["result"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - importedDoc, err := col.Get(ctx, doc.Key(), false) - require.NoError(t, err) - - require.Equal(t, doc.Key().String(), importedDoc.Key().String()) -} - -func TestImportHandler_WithExistingDoc_DocumentExistError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - err = os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - _ = testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - QueryParams: map[string]string{"collections": "User"}, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains( - t, - errResponse.Errors[0].Extensions.Stack, - "failed to save a new doc to collection: a document with the given dockey already exists", - ) - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "failed to save a new doc to collection: a document with the given dockey already exists. DocKey: bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab", - errResponse.Errors[0].Message, - ) -} diff --git a/api/http/handlerfuncs_index.go b/api/http/handlerfuncs_index.go deleted file mode 100644 index e8d10d900e..0000000000 --- a/api/http/handlerfuncs_index.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "strings" - - "github.com/sourcenetwork/defradb/client" -) - -func createIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var data map[string]string - err = getJSON(req, &data) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - colNameArg := data["collection"] - fieldsArg := data["fields"] - indexNameArg := data["name"] - - col, err := db.GetCollectionByName(req.Context(), colNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - fields := strings.Split(fieldsArg, ",") - fieldDescriptions := make([]client.IndexedFieldDescription, 0, len(fields)) - for _, field := range fields { - fieldDescriptions = append(fieldDescriptions, client.IndexedFieldDescription{Name: field}) - } - indexDesc := client.IndexDescription{ - Name: indexNameArg, - Fields: fieldDescriptions, - } - indexDesc, err = col.CreateIndex(req.Context(), indexDesc) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("index", indexDesc), - http.StatusOK, - ) -} - -func dropIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var data map[string]string - err = getJSON(req, &data) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - colNameArg := data["collection"] - indexNameArg := data["name"] - - col, err := db.GetCollectionByName(req.Context(), colNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = col.DropIndex(req.Context(), indexNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func listIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - queryParams := req.URL.Query() - collectionParam := queryParams.Get("collection") - - if collectionParam == "" { - indexesPerCol, err := db.GetAllIndexes(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - sendJSON( - req.Context(), - rw, - simpleDataResponse("collections", indexesPerCol), - http.StatusOK, - ) - } else { - col, err := db.GetCollectionByName(req.Context(), collectionParam) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - indexes, err := col.GetIndexes(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - sendJSON( - req.Context(), - rw, - simpleDataResponse("indexes", indexes), - http.StatusOK, - ) - } -} diff --git a/api/http/handlerfuncs_index_test.go b/api/http/handlerfuncs_index_test.go deleted file mode 100644 index 3e82249ef8..0000000000 --- a/api/http/handlerfuncs_index_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/client/mocks" - "github.com/sourcenetwork/defradb/errors" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -func addDBToContext(t *testing.T, req *http.Request, db *mocks.DB) *http.Request { - if db == nil { - db = mocks.NewDB(t) - } - ctx := context.WithValue(req.Context(), ctxDB{}, db) - return req.WithContext(ctx) -} - -func TestCreateIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(createIndexHandler) - assert.HTTPBodyContains(t, handler, "POST", IndexPath, nil, "no database available") -} - -func TestCreateIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte("invalid map"))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, nil) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") -} - -func TestCreateIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestCreateIndexHandler_IfFailsToCreateIndex_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().CreateIndex(mock.Anything, mock.Anything). - Return(client.IndexDescription{}, testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestDropIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(dropIndexHandler) - assert.HTTPBodyContains(t, handler, "DELETE", IndexPath, nil, "no database available") -} - -func TestDropIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte("invalid map"))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, nil) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") -} - -func TestDropIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestDropIndexHandler_IfFailsToDropIndex_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().DropIndex(mock.Anything, mock.Anything).Return(testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(listIndexHandler) - assert.HTTPBodyContains(t, handler, "GET", IndexPath, nil, "no database available") -} - -func TestListIndexHandler_IfFailsToGetAllIndexes_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetAllIndexes(mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("GET", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - u, _ := url.Parse("http://defradb.com" + IndexPath) - params := url.Values{} - params.Add("collection", "testCollection") - u.RawQuery = params.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfFailsToCollectionGetIndexes_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().GetIndexes(mock.Anything).Return(nil, testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - u, _ := url.Parse("http://defradb.com" + IndexPath) - params := url.Values{} - params.Add("collection", "testCollection") - u.RawQuery = params.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} diff --git a/api/http/handlerfuncs_test.go b/api/http/handlerfuncs_test.go deleted file mode 100644 index ee7389a250..0000000000 --- a/api/http/handlerfuncs_test.go +++ /dev/null @@ -1,1184 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - badger "github.com/dgraph-io/badger/v4" - dshelp "github.com/ipfs/boxo/datastore/dshelp" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/errors" -) - -type testOptions struct { - Testing *testing.T - DB client.DB - Handlerfunc http.HandlerFunc - Method string - Path string - Body io.Reader - Headers map[string]string - QueryParams map[string]string - ExpectedStatus int - ResponseData any - ServerOptions serverOptions -} - -type testUser struct { - Key string `json:"_key"` - Versions []testVersion `json:"_version"` -} - -type testVersion struct { - CID string `json:"cid"` -} - -func TestRootHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: RootPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestPingHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PingPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "pong", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestDumpHandlerWithNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: DumpPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "ok", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestDumpHandlerWithDBError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: DumpPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExecGQLWithNilBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: nil, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) -} - -func TestExecGQLWithEmptyBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: bytes.NewBuffer([]byte("")), - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) -} - -type mockReadCloser struct { - mock.Mock -} - -func (m *mockReadCloser) Read(p []byte) (n int, err error) { - args := m.Called(p) - return args.Int(0), args.Error(1) -} - -func TestExecGQLWithMockBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - mockReadCloser := mockReadCloser{} - // if Read is called, it will return error - mockReadCloser.On("Read", mock.AnythingOfType("[]uint8")).Return(0, errors.New("error reading")) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: &mockReadCloser, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "error reading", errResponse.Errors[0].Message) -} - -func TestExecGQLWithInvalidContentType(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - testRequest(testOptions{ - Testing: t, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 500, - Headers: map[string]string{"Content-Type": contentTypeJSON + "; this-is-wrong"}, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) -} - -func TestExecGQLWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - testRequest(testOptions{ - Testing: t, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - // statement with JSON formatting error - stmt := ` -[ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -]` - - buf := bytes.NewBuffer([]byte(stmt)) - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeJSON(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -{ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -}` - // remove line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` - { - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"notAField\\\": true - }\" - ) {_key} - }" - }` - - // remove line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - resp := GQLResult{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") - require.Len(t, resp.Errors, 1) -} - -func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -{ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -}` - // remote line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON + "; charset=utf8"}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: nil, - Headers: map[string]string{"Content-Type": contentTypeFormURLEncoded}, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeGraphQL}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeText(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerWithSubsctiption(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - stmt := ` -subscription { - User { - _key - age - name - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - ch := make(chan []byte) - errCh := make(chan error) - - // We need to set a timeout otherwise the testSubscriptionRequest function will block until the - // http.ServeHTTP call returns, which in this case will only happen with a timeout. - ctxTimeout, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - - go testSubscriptionRequest(ctxTimeout, testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeGraphQL}, - ExpectedStatus: 200, - }, ch, errCh) - - // We wait to ensure the subscription requests can subscribe to the event channel. - time.Sleep(time.Second / 2) - - // add document - stmt2 := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf2 := bytes.NewBuffer([]byte(stmt2)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf2, - ExpectedStatus: 200, - ResponseData: &resp, - }) - select { - case data := <-ch: - require.Contains(t, string(data), users[0].Key) - case err := <-errCh: - t.Fatal(err) - } -} - -func TestListSchemaHandlerWithoutDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: SchemaPath, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestListSchemaHandlerWitNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - stmt := ` -type user { - name: String - age: Int - verified: Boolean - points: Float -} -type group { - owner: user - members: [user] -}` - - _, err := defra.AddSchema(ctx, stmt) - if err != nil { - t.Fatal(err) - } - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: SchemaPath, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - assert.Equal(t, map[string]any{ - "collections": []any{ - map[string]any{ - "name": "group", - "id": "bafkreicdtcgmgjjjao4zzaoacy26cl7xtnnev4qotvflellmdrzi57m5re", - "version_id": "bafkreicdtcgmgjjjao4zzaoacy26cl7xtnnev4qotvflellmdrzi57m5re", - "fields": []any{ - map[string]any{ - "id": "0", - "kind": "ID", - "name": "_key", - "internal": true, - }, - map[string]any{ - "id": "1", - "kind": "[user]", - "name": "members", - "internal": false, - }, - map[string]any{ - "id": "2", - "kind": "user", - "name": "owner", - "internal": false, - }, - map[string]any{ - "id": "3", - "kind": "ID", - "name": "owner_id", - "internal": true, - }, - }, - }, - map[string]any{ - "name": "user", - "id": "bafkreigl2v5trzfznb7dm3dubmsbzkw73s3phjm6laegswzl4625wc2grm", - "version_id": "bafkreigl2v5trzfznb7dm3dubmsbzkw73s3phjm6laegswzl4625wc2grm", - "fields": []any{ - map[string]any{ - "id": "0", - "kind": "ID", - "name": "_key", - "internal": true, - }, - map[string]any{ - "id": "1", - "kind": "Int", - "name": "age", - "internal": false, - }, - map[string]any{ - "id": "2", - "kind": "String", - "name": "name", - "internal": false, - }, - map[string]any{ - "id": "3", - "kind": "Float", - "name": "points", - "internal": false, - }, - map[string]any{ - "id": "4", - "kind": "Boolean", - "name": "verified", - "internal": false, - }, - }, - }, - }, - }, v) - - default: - t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) - } -} - -func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - mockReadCloser := mockReadCloser{} - // if Read is called, it will return error - mockReadCloser.On("Read", mock.AnythingOfType("[]uint8")).Return(0, errors.New("error reading")) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: SchemaPath, - Body: &mockReadCloser, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "error reading", errResponse.Errors[0].Message) -} - -func TestLoadSchemaHandlerWithoutDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // statement with types instead of type - stmt := ` -types User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types User {\n ^\n3: \\u0009name: String\n", - errResponse.Errors[0].Message, - ) -} - -func TestLoadSchemaHandlerWitNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, map[string]any{ - "result": "success", - "collections": []any{ - map[string]any{ - "name": "User", - "id": "bafkreiet7xqehjsjsthy6nafvtbz4el376uudhkjyeifuvvsr64se33swm", - "version_id": "bafkreiet7xqehjsjsthy6nafvtbz4el376uudhkjyeifuvvsr64se33swm", - }, - }, - }, v) - - default: - t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) - } -} - -func TestGetBlockHandlerWithMultihashError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + "/1234", - Body: nil, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - cID, err := cid.Parse("bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - if err != nil { - t.Fatal(err) - } - dsKey := dshelp.MultihashToDsKey(cID.Hash()) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + dsKey.String(), - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + "/bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: BlocksPath + "/bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - if !strings.Contains(users[0].Key, "bae-") { - t.Fatal("expected valid document key") - } - - // get document cid - stmt2 := ` -query { - User (dockey: "%s") { - _version { - cid - } - } -}` - buf2 := bytes.NewBuffer([]byte(fmt.Sprintf(stmt2, users[0].Key))) - - users2 := []testUser{} - resp2 := DataResponse{ - Data: &users2, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf2, - ExpectedStatus: 200, - ResponseData: &resp2, - }) - - _, err := cid.Decode(users2[0].Versions[0].CID) - if err != nil { - t.Fatal(err) - } - - resp3 := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: BlocksPath + "/" + users2[0].Versions[0].CID, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp3, - }) - - switch d := resp3.Data.(type) { - case map[string]any: - switch val := d["val"].(type) { - case string: - require.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) - default: - t.Fatalf("expecting string but got %T", val) - } - default: - t.Fatalf("expecting map[string]any but got %T", d) - } -} - -func TestPeerIDHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PeerIDPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - ServerOptions: serverOptions{ - peerID: "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", - }, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestPeerIDHandlerWithNoPeerIDInContext(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PeerIDPath, - Body: nil, - ExpectedStatus: 404, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") - require.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) -} - -func testRequest(opt testOptions) []byte { - req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) - if err != nil { - opt.Testing.Fatal(err) - } - - for k, v := range opt.Headers { - req.Header.Set(k, v) - } - - q := req.URL.Query() - for k, v := range opt.QueryParams { - q.Add(k, v) - } - req.URL.RawQuery = q.Encode() - - h := newHandler(opt.DB, opt.ServerOptions) - rec := httptest.NewRecorder() - h.ServeHTTP(rec, req) - assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - - resBody, err := io.ReadAll(rec.Result().Body) - if err != nil { - opt.Testing.Fatal(err) - } - - if opt.ResponseData != nil { - err = json.Unmarshal(resBody, &opt.ResponseData) - if err != nil { - opt.Testing.Fatal(err) - } - } - - return resBody -} - -func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byte, errCh chan error) { - req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) - if err != nil { - errCh <- err - return - } - - req = req.WithContext(ctx) - - for k, v := range opt.Headers { - req.Header.Set(k, v) - } - - h := newHandler(opt.DB, opt.ServerOptions) - rec := httptest.NewRecorder() - h.ServeHTTP(rec, req) - require.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - - respBody, err := io.ReadAll(rec.Result().Body) - if err != nil { - errCh <- err - return - } - - ch <- respBody -} - -func testNewInMemoryDB(t *testing.T, ctx context.Context) client.DB { - // init in memory DB - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - t.Fatal(err) - } - - options := []db.Option{ - db.WithUpdateEvents(), - } - - defra, err := db.NewDB(ctx, rootstore, options...) - if err != nil { - t.Fatal(err) - } - - return defra -} - -func testLoadSchema(t *testing.T, ctx context.Context, db client.DB) { - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - _, err := db.AddSchema(ctx, stmt) - if err != nil { - t.Fatal(err) - } -} diff --git a/api/http/logger.go b/api/http/logger.go deleted file mode 100644 index 2a91a271c2..0000000000 --- a/api/http/logger.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "time" - - "github.com/sourcenetwork/defradb/logging" -) - -type loggingResponseWriter struct { - statusCode int - contentLength int - - http.ResponseWriter -} - -func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter { - return &loggingResponseWriter{ - statusCode: http.StatusOK, - contentLength: 0, - ResponseWriter: w, - } -} - -func (lrw *loggingResponseWriter) Flush() { - lrw.ResponseWriter.(http.Flusher).Flush() -} - -func (lrw *loggingResponseWriter) Header() http.Header { - return lrw.ResponseWriter.Header() -} - -func (lrw *loggingResponseWriter) WriteHeader(code int) { - lrw.statusCode = code - lrw.ResponseWriter.WriteHeader(code) -} - -func (lrw *loggingResponseWriter) Write(b []byte) (int, error) { - lrw.contentLength = len(b) - return lrw.ResponseWriter.Write(b) -} - -func loggerMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - start := time.Now() - lrw := newLoggingResponseWriter(rw) - next.ServeHTTP(lrw, req) - elapsed := time.Since(start) - log.Info( - req.Context(), - "Request", - logging.NewKV( - "Method", - req.Method, - ), - logging.NewKV( - "Path", - req.URL.Path, - ), - logging.NewKV( - "Status", - lrw.statusCode, - ), - logging.NewKV( - "LengthBytes", - lrw.contentLength, - ), - logging.NewKV( - "ElapsedTime", - elapsed.String(), - ), - ) - }) -} diff --git a/api/http/logger_test.go b/api/http/logger_test.go deleted file mode 100644 index 9c2791d9df..0000000000 --- a/api/http/logger_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bufio" - "encoding/json" - "net/http" - "net/http/httptest" - "os" - "path" - "strconv" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/logging" -) - -func TestNewLoggingResponseWriterLogger(t *testing.T) { - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - - lrw.WriteHeader(400) - assert.Equal(t, 400, lrw.statusCode) - - content := "Hello world!" - - length, err := lrw.Write([]byte(content)) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, length, lrw.contentLength) - assert.Equal(t, rec.Body.String(), content) -} - -func TestLogginResponseWriterWriteWithChunks(t *testing.T) { - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - - content := "Hello world!" - contentLength := len(content) - - lrw.Header().Set("Content-Length", strconv.Itoa(contentLength)) - - length1, err := lrw.Write([]byte(content[:contentLength/2])) - if err != nil { - t.Fatal(err) - } - - length2, err := lrw.Write([]byte(content[contentLength/2:])) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, contentLength, length1+length2) - assert.Equal(t, rec.Body.String(), content) -} - -func TestLoggerKeyValueOutput(t *testing.T) { - dir := t.TempDir() - - // send logs to temp file so we can inspect it - logFile := path.Join(dir, "http_test.log") - - req, err := http.NewRequest("GET", "/ping", nil) - if err != nil { - t.Fatal(err) - } - - rec2 := httptest.NewRecorder() - - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - OutputPaths: []string{logFile}, - }) - loggerMiddleware(http.HandlerFunc(pingHandler)).ServeHTTP(rec2, req) - assert.Equal(t, 200, rec2.Result().StatusCode) - - // inspect the log file - kv, err := readLog(logFile) - if err != nil { - t.Fatal(err) - } - - // check that everything is as expected - assert.Equal(t, "{\"data\":{\"response\":\"pong\"}}", rec2.Body.String()) - assert.Equal(t, "INFO", kv["level"]) - assert.Equal(t, "http", kv["logger"]) - assert.Equal(t, "Request", kv["msg"]) - assert.Equal(t, "GET", kv["Method"]) - assert.Equal(t, "/ping", kv["Path"]) - assert.Equal(t, float64(200), kv["Status"]) - assert.Equal(t, float64(28), kv["LengthBytes"]) -} - -func readLog(path string) (map[string]any, error) { - // inspect the log file - f, err := os.Open(path) - if err != nil { - return nil, errors.WithStack(err) - } - - scanner := bufio.NewScanner(f) - scanner.Scan() - logLine := scanner.Text() - - kv := make(map[string]any) - err = json.Unmarshal([]byte(logLine), &kv) - if err != nil { - return nil, errors.WithStack(err) - } - - return kv, nil -} diff --git a/api/http/request_result.go b/api/http/request_result.go deleted file mode 100644 index f5bf7912e9..0000000000 --- a/api/http/request_result.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import "github.com/sourcenetwork/defradb/client" - -type GQLResult struct { - Errors []string `json:"errors,omitempty"` - - Data any `json:"data"` -} - -func newGQLResult(r client.GQLResult) *GQLResult { - errors := make([]string, len(r.Errors)) - for i := range r.Errors { - errors[i] = r.Errors[i].Error() - } - - return &GQLResult{ - Errors: errors, - Data: r.Data, - } -} diff --git a/api/http/router.go b/api/http/router.go deleted file mode 100644 index 2d54a16560..0000000000 --- a/api/http/router.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "net/url" - "path" - "strings" - - "github.com/pkg/errors" -) - -const ( - // Version is the current version of the HTTP API. - Version string = "v0" - versionedAPIPath string = "/api/" + Version - - RootPath string = versionedAPIPath + "" - PingPath string = versionedAPIPath + "/ping" - DumpPath string = versionedAPIPath + "/debug/dump" - BlocksPath string = versionedAPIPath + "/blocks" - GraphQLPath string = versionedAPIPath + "/graphql" - SchemaPath string = versionedAPIPath + "/schema" - SchemaMigrationPath string = SchemaPath + "/migration" - IndexPath string = versionedAPIPath + "/index" - PeerIDPath string = versionedAPIPath + "/peerid" - BackupPath string = versionedAPIPath + "/backup" - ExportPath string = BackupPath + "/export" - ImportPath string = BackupPath + "/import" -) - -// playgroundHandler is set when building with the playground build tag -var playgroundHandler http.Handler - -func setRoutes(h *handler) *handler { - h.Get(RootPath, rootHandler) - h.Get(PingPath, pingHandler) - h.Get(DumpPath, dumpHandler) - h.Get(BlocksPath+"/{cid}", getBlockHandler) - h.Get(GraphQLPath, execGQLHandler) - h.Post(GraphQLPath, execGQLHandler) - h.Get(SchemaPath, listSchemaHandler) - h.Post(SchemaPath, loadSchemaHandler) - h.Patch(SchemaPath, patchSchemaHandler) - h.Post(SchemaMigrationPath, setMigrationHandler) - h.Get(SchemaMigrationPath, getMigrationHandler) - h.Post(IndexPath, createIndexHandler) - h.Delete(IndexPath, dropIndexHandler) - h.Get(IndexPath, listIndexHandler) - h.Get(PeerIDPath, peerIDHandler) - h.Post(ExportPath, exportHandler) - h.Post(ImportPath, importHandler) - h.Handle("/*", playgroundHandler) - - return h -} - -// JoinPaths takes a base path and any number of additional paths -// and combines them safely to form a full URL path. -// The base must start with a http or https. -func JoinPaths(base string, paths ...string) (*url.URL, error) { - if !strings.HasPrefix(base, "http") { - return nil, ErrSchema - } - - u, err := url.Parse(base) - if err != nil { - return nil, errors.WithStack(err) - } - - u.Path = path.Join(u.Path, strings.Join(paths, "/")) - - return u, nil -} diff --git a/api/http/router_test.go b/api/http/router_test.go deleted file mode 100644 index e43260ef43..0000000000 --- a/api/http/router_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestJoinPathsWithBase(t *testing.T) { - path, err := JoinPaths("http://localhost:9181", BlocksPath, "cid_of_some_sort") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http://localhost:9181"+BlocksPath+"/cid_of_some_sort", path.String()) -} - -func TestJoinPathsWithNoBase(t *testing.T) { - _, err := JoinPaths("", BlocksPath, "cid_of_some_sort") - assert.ErrorIs(t, ErrSchema, err) -} - -func TestJoinPathsWithBaseWithoutHttpPrefix(t *testing.T) { - _, err := JoinPaths("localhost:9181", BlocksPath, "cid_of_some_sort") - assert.ErrorIs(t, ErrSchema, err) -} - -func TestJoinPathsWithNoPaths(t *testing.T) { - path, err := JoinPaths("http://localhost:9181") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http://localhost:9181", path.String()) -} - -func TestJoinPathsWithInvalidCharacter(t *testing.T) { - _, err := JoinPaths("https://%gh&%ij") - assert.Error(t, err) -} diff --git a/api/http/server.go b/api/http/server.go deleted file mode 100644 index a71dccb0ec..0000000000 --- a/api/http/server.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "path" - "strings" - - "github.com/sourcenetwork/immutable" - "golang.org/x/crypto/acme/autocert" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" -) - -const ( - // These constants are best effort durations that fit our current API - // and possibly prevent from running out of file descriptors. - // readTimeout = 5 * time.Second - // writeTimeout = 10 * time.Second - // idleTimeout = 120 * time.Second - - // Temparily disabling timeouts until [this proposal](https://github.com/golang/go/issues/54136) is merged. - // https://github.com/sourcenetwork/defradb/issues/927 - readTimeout = 0 - writeTimeout = 0 - idleTimeout = 0 -) - -const ( - httpPort = ":80" - httpsPort = ":443" -) - -// Server struct holds the Handler for the HTTP API. -type Server struct { - options serverOptions - listener net.Listener - certManager *autocert.Manager - // address that is assigned to the server on listen - address string - - http.Server -} - -type serverOptions struct { - // list of allowed origins for CORS. - allowedOrigins []string - // ID of the server node. - peerID string - // when the value is present, the server will run with tls - tls immutable.Option[tlsOptions] - // root directory for the node config. - rootDir string - // The domain for the API (optional). - domain immutable.Option[string] -} - -type tlsOptions struct { - // Public key for TLS. Ignored if domain is set. - pubKey string - // Private key for TLS. Ignored if domain is set. - privKey string - // email address for the CA to send problem notifications (optional) - email string - // specify the tls port - port string -} - -// NewServer instantiates a new server with the given http.Handler. -func NewServer(db client.DB, options ...func(*Server)) *Server { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - for _, opt := range append(options, DefaultOpts()) { - opt(srv) - } - - srv.Handler = newHandler(db, srv.options) - - return srv -} - -func newHTTPRedirServer(m *autocert.Manager) *Server { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - srv.Addr = httpPort - srv.Handler = m.HTTPHandler(nil) - - return srv -} - -// DefaultOpts returns the default options for the server. -func DefaultOpts() func(*Server) { - return func(s *Server) { - if s.Addr == "" { - s.Addr = "localhost:9181" - } - } -} - -// WithAllowedOrigins returns an option to set the allowed origins for CORS. -func WithAllowedOrigins(origins ...string) func(*Server) { - return func(s *Server) { - s.options.allowedOrigins = append(s.options.allowedOrigins, origins...) - } -} - -// WithAddress returns an option to set the address for the server. -func WithAddress(addr string) func(*Server) { - return func(s *Server) { - s.Addr = addr - - // If the address is not localhost, we check to see if it's a valid IP address. - // If it's not a valid IP, we assume that it's a domain name to be used with TLS. - if !strings.HasPrefix(addr, "localhost:") && !strings.HasPrefix(addr, ":") { - host, _, err := net.SplitHostPort(addr) - if err != nil { - host = addr - } - ip := net.ParseIP(host) - if ip == nil { - s.Addr = httpPort - s.options.domain = immutable.Some(host) - } - } - } -} - -// WithCAEmail returns an option to set the email address for the CA to send problem notifications. -func WithCAEmail(email string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.email = email - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithPeerID returns an option to set the identifier of the server node. -func WithPeerID(id string) func(*Server) { - return func(s *Server) { - s.options.peerID = id - } -} - -// WithRootDir returns an option to set the root directory for the node config. -func WithRootDir(rootDir string) func(*Server) { - return func(s *Server) { - s.options.rootDir = rootDir - } -} - -// WithSelfSignedCert returns an option to set the public and private keys for TLS. -func WithSelfSignedCert(pubKey, privKey string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.pubKey = pubKey - tlsOpt.privKey = privKey - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithTLS returns an option to enable TLS. -func WithTLS() func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.port = httpsPort - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithTLSPort returns an option to set the port for TLS. -func WithTLSPort(port int) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.port = fmt.Sprintf(":%d", port) - s.options.tls = immutable.Some(tlsOpt) - } -} - -// Listen creates a new net.Listener and saves it on the receiver. -func (s *Server) Listen(ctx context.Context) error { - var err error - if s.options.tls.HasValue() { - return s.listenWithTLS(ctx) - } - - lc := net.ListenConfig{} - s.listener, err = lc.Listen(ctx, "tcp", s.Addr) - if err != nil { - return errors.WithStack(err) - } - - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil -} - -func (s *Server) listenWithTLS(ctx context.Context) error { - cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - // We only allow cipher suites that are marked secure - // by ssllabs - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - }, - ServerName: "DefraDB", - } - - if s.options.domain.HasValue() && s.options.domain.Value() != "" { - s.Addr = s.options.tls.Value().port - - if s.options.tls.Value().email == "" || s.options.tls.Value().email == config.DefaultAPIEmail { - return ErrNoEmail - } - - certCache := path.Join(s.options.rootDir, "autocerts") - - log.FeedbackInfo( - ctx, - "Generating auto certificate", - logging.NewKV("Domain", s.options.domain.Value()), - logging.NewKV("Certificate cache", certCache), - ) - - m := &autocert.Manager{ - Cache: autocert.DirCache(certCache), - Prompt: autocert.AcceptTOS, - Email: s.options.tls.Value().email, - HostPolicy: autocert.HostWhitelist(s.options.domain.Value()), - } - - cfg.GetCertificate = m.GetCertificate - - // We set manager on the server instance to later start - // a redirection server. - s.certManager = m - } else { - // When not using auto cert, we create a self signed certificate - // with the provided public and prive keys. - log.FeedbackInfo(ctx, "Generating self signed certificate") - - cert, err := tls.LoadX509KeyPair( - s.options.tls.Value().privKey, - s.options.tls.Value().pubKey, - ) - if err != nil { - return errors.WithStack(err) - } - - cfg.Certificates = []tls.Certificate{cert} - } - - var err error - s.listener, err = tls.Listen("tcp", s.Addr, cfg) - if err != nil { - return errors.WithStack(err) - } - - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil -} - -// Run calls Serve with the receiver's listener. -func (s *Server) Run(ctx context.Context) error { - if s.listener == nil { - return ErrNoListener - } - - if s.certManager != nil { - // When using TLS it's important to redirect http requests to https - go func() { - srv := newHTTPRedirServer(s.certManager) - err := srv.ListenAndServe() - if err != nil && !errors.Is(err, http.ErrServerClosed) { - log.Info(ctx, "Something went wrong with the redirection server", logging.NewKV("Error", err)) - } - }() - } - return s.Serve(s.listener) -} - -// AssignedAddr returns the address that was assigned to the server on calls to listen. -func (s *Server) AssignedAddr() string { - return s.address -} diff --git a/cli/backup_export.go b/cli/backup_export.go index 32184bfe35..9e8d1c056e 100644 --- a/cli/backup_export.go +++ b/cli/backup_export.go @@ -11,24 +11,16 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" "strings" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) const jsonFileType = "json" -func MakeBackupExportCommand(cfg *config.Config) *cobra.Command { +func MakeBackupExportCommand() *cobra.Command { var collections []string var pretty bool var format string @@ -44,21 +36,14 @@ If the --pretty flag is provided, the JSON will be pretty printed. Example: export data for the 'Users' collection: defradb client export --collection Users user_data.json`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return NewErrInvalidArgumentLength(err, 1) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) (err error) { + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + if !isValidExportFormat(format) { return ErrInvalidExportFormat } outputPath := args[0] - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ExportPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } for i := range collections { collections[i] = strings.Trim(collections[i], " ") @@ -71,57 +56,7 @@ Example: export data for the 'Users' collection: Collections: collections, } - b, err := json.Marshal(data) - if err != nil { - return err - } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type exportResponse struct { - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := exportResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to export data", - logging.NewKV("Errors", r.Errors)) - } else if len(collections) == 1 { - log.FeedbackInfo(cmd.Context(), "Data exported for collection "+collections[0]) - } else if len(collections) > 1 { - log.FeedbackInfo(cmd.Context(), "Data exported for collections "+strings.Join(collections, ", ")) - } else { - log.FeedbackInfo(cmd.Context(), "Data exported for all collections") - } - } - return nil + return store.BasicExport(cmd.Context(), &data) }, } cmd.Flags().BoolVarP(&pretty, "pretty", "p", false, "Set the output JSON to be pretty printed") diff --git a/cli/backup_export_test.go b/cli/backup_export_test.go deleted file mode 100644 index 9539a1cdb1..0000000000 --- a/cli/backup_export_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - "encoding/json" - "os" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestBackupExportCmd_WithNoArgument_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.ValidateArgs([]string{}) - require.ErrorIs(t, err, ErrInvalidArgumentLength) -} - -func TestBackupExportCmd_WithInvalidExportFormat_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - dbExportCmd := MakeBackupExportCommand(cfg) - - filepath := t.TempDir() + "/test.json" - - dbExportCmd.Flags().Set("format", "invalid") - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.ErrorIs(t, err, ErrInvalidExportFormat) -} - -func TestBackupExportCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - - filepath := t.TempDir() + "/test.json" - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestBackupExportCmd_WithEmptyDatastore_NoError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Len(t, b, 2) // file should be an empty json object -} - -func TestBackupExportCmd_WithInvalidCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User") - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to export data")) -} - -func TestBackupExportCmd_WithAllCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, - string(b), - ) -} - -func TestBackupExportCmd_WithAllCollectionAndPrettyFormating_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("pretty", "true") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{ - "User": [ - { - "_key": "bae-e933420a-988a-56f8-8952-6c245aebd519", - "_newKey": "bae-e933420a-988a-56f8-8952-6c245aebd519", - "age": 30, - "name": "John" - } - ] -}`, - string(b), - ) -} - -func TestBackupExportCmd_WithSingleCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for collection User")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, - string(b), - ) -} - -func TestBackupExportCmd_WithMultipleCollections_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - } - - type Address { - street: String - city: String - }`) - require.NoError(t, err) - - doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col1, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col1.Create(ctx, doc1) - require.NoError(t, err) - - doc2, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) - require.NoError(t, err) - - col2, err := di.db.GetCollectionByName(ctx, "Address") - require.NoError(t, err) - - err = col2.Create(ctx, doc2) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User, Address") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for collections User, Address")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - fileMap := map[string]any{} - err = json.Unmarshal(b, &fileMap) - require.NoError(t, err) - - expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) - err = json.Unmarshal(data, &expectedMap) - require.NoError(t, err) - - require.EqualValues(t, expectedMap, fileMap) -} diff --git a/cli/backup_import.go b/cli/backup_import.go index 6802230aa0..35af345a0a 100644 --- a/cli/backup_import.go +++ b/cli/backup_import.go @@ -11,20 +11,10 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) -func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { +func MakeBackupImportCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "import ", Short: "Import a JSON data file to the database", @@ -32,66 +22,10 @@ func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { Example: import data to the database: defradb client import user_data.json`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return NewErrInvalidArgumentLength(err, 1) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ImportPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - inputPath := args[0] - data := map[string]string{ - "filepath": inputPath, - } - - b, err := json.Marshal(data) - if err != nil { - return err - } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexCreateResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to import data", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully imported data from file", - logging.NewKV("File", inputPath)) - } - } - return nil + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + return store.BasicImport(cmd.Context(), args[0]) }, } return cmd diff --git a/cli/backup_import_test.go b/cli/backup_import_test.go deleted file mode 100644 index 101792dd0c..0000000000 --- a/cli/backup_import_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - "os" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestBackupImportCmd_WithNoArgument_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.ValidateArgs([]string{}) - require.ErrorIs(t, err, ErrInvalidArgumentLength) -} - -func TestBackupImportCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - - filepath := t.TempDir() + "/test.json" - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestBackupImportCmd_WithNonExistantFile_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to import data")) -} - -func TestBackupImportCmd_WithEmptyDatastore_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), - 0664, - ) - require.NoError(t, err) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to import data")) -} - -func TestBackupImportCmd_WithExistingCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - err = os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), - 0664, - ) - require.NoError(t, err) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Successfully imported data from file")) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") - require.NoError(t, err) - doc, err := col.Get(ctx, key, false) - require.NoError(t, err) - - val, err := doc.Get("name") - require.NoError(t, err) - - require.Equal(t, "John", val.(string)) -} diff --git a/cli/blocks_get.go b/cli/blocks_get.go deleted file mode 100644 index c3519f99e7..0000000000 --- a/cli/blocks_get.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" -) - -func MakeBlocksGetCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "get [CID]", - Short: "Get a block by its CID from the blockstore", - RunE: func(cmd *cobra.Command, args []string) (err error) { - if len(args) != 1 { - return NewErrMissingArg("CID") - } - cid := args[0] - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.BlocksPath, cid) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToReadResponseBody(err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return NewErrFailedToStatStdOut(err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return NewErrFailedToHandleGQLErrors(err) - } - indentedResult, err := indentJSON(response) - if err != nil { - return NewErrFailedToPrettyPrintResponse(err) - } - if graphlErr { - log.FeedbackError(cmd.Context(), indentedResult) - } else { - log.FeedbackInfo(cmd.Context(), indentedResult) - } - } - return nil - }, - } - return cmd -} diff --git a/cli/cli.go b/cli/cli.go index 707adbab7c..0cb9fbb5bc 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -14,176 +14,104 @@ Package cli provides the command-line interface. package cli import ( - "bufio" - "bytes" - "context" - "encoding/json" - "os" - "strings" - "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" ) var log = logging.MustNewLogger("cli") -const badgerDatastoreName = "badger" - -// Errors with how the command is invoked by user -var usageErrors = []string{ - // cobra errors - subject to change with new versions of cobra - "flag needs an argument", - "invalid syntax", - "unknown flag", - "unknown shorthand flag", - "unknown command", - // custom defradb errors - errMissingArg, - errMissingArgs, - errTooManyArgs, -} - -type DefraCommand struct { - RootCmd *cobra.Command - Cfg *config.Config -} - // NewDefraCommand returns the root command instanciated with its tree of subcommands. -func NewDefraCommand(cfg *config.Config) DefraCommand { - rootCmd := MakeRootCommand(cfg) - rpcCmd := MakeRPCCommand(cfg) - blocksCmd := MakeBlocksCommand() - schemaCmd := MakeSchemaCommand() - schemaMigrationCmd := MakeSchemaMigrationCommand() - indexCmd := MakeIndexCommand() - clientCmd := MakeClientCommand() - backupCmd := MakeBackupCommand() - rpcReplicatorCmd := MakeReplicatorCommand() - p2pCollectionCmd := MakeP2PCollectionCommand() - p2pCollectionCmd.AddCommand( - MakeP2PCollectionAddCommand(cfg), - MakeP2PCollectionRemoveCommand(cfg), - MakeP2PCollectionGetallCommand(cfg), +func NewDefraCommand(cfg *config.Config) *cobra.Command { + p2p_collection := MakeP2PCollectionCommand() + p2p_collection.AddCommand( + MakeP2PCollectionAddCommand(), + MakeP2PCollectionRemoveCommand(), + MakeP2PCollectionGetAllCommand(), + ) + + p2p_replicator := MakeP2PReplicatorCommand() + p2p_replicator.AddCommand( + MakeP2PReplicatorGetAllCommand(), + MakeP2PReplicatorSetCommand(), + MakeP2PReplicatorDeleteCommand(), ) - rpcReplicatorCmd.AddCommand( - MakeReplicatorGetallCommand(cfg), - MakeReplicatorSetCommand(cfg), - MakeReplicatorDeleteCommand(cfg), + + p2p := MakeP2PCommand() + p2p.AddCommand( + p2p_replicator, + p2p_collection, + MakeP2PInfoCommand(), ) - rpcCmd.AddCommand( - rpcReplicatorCmd, - p2pCollectionCmd, + + schema_migrate := MakeSchemaMigrationCommand() + schema_migrate.AddCommand( + MakeSchemaMigrationSetCommand(), + MakeSchemaMigrationGetCommand(), + MakeSchemaMigrationReloadCommand(), + MakeSchemaMigrationUpCommand(), + MakeSchemaMigrationDownCommand(), ) - blocksCmd.AddCommand( - MakeBlocksGetCommand(cfg), + + schema := MakeSchemaCommand() + schema.AddCommand( + MakeSchemaAddCommand(), + MakeSchemaPatchCommand(), + MakeSchemaSetDefaultCommand(), + schema_migrate, ) - schemaMigrationCmd.AddCommand( - MakeSchemaMigrationSetCommand(cfg), - MakeSchemaMigrationGetCommand(cfg), + + index := MakeIndexCommand() + index.AddCommand( + MakeIndexCreateCommand(), + MakeIndexDropCommand(), + MakeIndexListCommand(), ) - schemaCmd.AddCommand( - MakeSchemaAddCommand(cfg), - MakeSchemaListCommand(cfg), - MakeSchemaPatchCommand(cfg), - schemaMigrationCmd, + + backup := MakeBackupCommand() + backup.AddCommand( + MakeBackupExportCommand(), + MakeBackupImportCommand(), ) - indexCmd.AddCommand( - MakeIndexCreateCommand(cfg), - MakeIndexDropCommand(cfg), - MakeIndexListCommand(cfg), + + tx := MakeTxCommand() + tx.AddCommand( + MakeTxCreateCommand(cfg), + MakeTxCommitCommand(cfg), + MakeTxDiscardCommand(cfg), ) - backupCmd.AddCommand( - MakeBackupExportCommand(cfg), - MakeBackupImportCommand(cfg), + + collection := MakeCollectionCommand(cfg) + collection.AddCommand( + MakeCollectionGetCommand(), + MakeCollectionKeysCommand(), + MakeCollectionDeleteCommand(), + MakeCollectionUpdateCommand(), + MakeCollectionCreateCommand(), + MakeCollectionDescribeCommand(), ) - clientCmd.AddCommand( - MakeDumpCommand(cfg), - MakePingCommand(cfg), - MakeRequestCommand(cfg), - MakePeerIDCommand(cfg), - schemaCmd, - indexCmd, - rpcCmd, - blocksCmd, - backupCmd, + + client := MakeClientCommand(cfg) + client.AddCommand( + MakeDumpCommand(), + MakeRequestCommand(), + schema, + index, + p2p, + backup, + tx, + collection, ) - rootCmd.AddCommand( - clientCmd, + + root := MakeRootCommand(cfg) + root.AddCommand( + client, MakeStartCommand(cfg), MakeServerDumpCmd(cfg), MakeVersionCommand(), MakeInitCommand(cfg), ) - return DefraCommand{rootCmd, cfg} -} - -func (defraCmd *DefraCommand) Execute(ctx context.Context) error { - // Silence cobra's default output to control usage and error display. - defraCmd.RootCmd.SilenceUsage = true - defraCmd.RootCmd.SilenceErrors = true - defraCmd.RootCmd.SetOut(os.Stdout) - cmd, err := defraCmd.RootCmd.ExecuteContextC(ctx) - if err != nil { - // Intentional cancellation. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil - } - // User error. - for _, cobraError := range usageErrors { - if strings.HasPrefix(err.Error(), cobraError) { - log.FeedbackErrorE(ctx, "Usage error", err) - if usageErr := cmd.Usage(); usageErr != nil { - log.FeedbackFatalE(ctx, "error displaying usage help", usageErr) - } - return err - } - } - // Internal error. - log.FeedbackErrorE(ctx, "Execution error", err) - return err - } - return nil -} - -func isFileInfoPipe(fi os.FileInfo) bool { - return fi.Mode()&os.ModeNamedPipe != 0 -} - -func readStdin() (string, error) { - var s strings.Builder - scanner := bufio.NewScanner(os.Stdin) - for scanner.Scan() { - s.Write(scanner.Bytes()) - } - if err := scanner.Err(); err != nil { - return "", errors.Wrap("reading standard input", err) - } - return s.String(), nil -} - -func indentJSON(b []byte) (string, error) { - var indentedJSON bytes.Buffer - err := json.Indent(&indentedJSON, b, "", " ") - return indentedJSON.String(), err -} - -type graphqlErrors struct { - Errors any `json:"errors"` -} - -func hasGraphQLErrors(buf []byte) (bool, error) { - errs := graphqlErrors{} - err := json.Unmarshal(buf, &errs) - if err != nil { - return false, errors.Wrap("couldn't parse GraphQL response %w", err) - } - if errs.Errors != nil { - return true, nil - } else { - return false, nil - } + return root } diff --git a/cli/cli_test.go b/cli/cli_test.go deleted file mode 100644 index 877dd7b69f..0000000000 --- a/cli/cli_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "testing" - - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -// Verify that the top-level commands are registered, and if particular ones have subcommands. -func TestNewDefraCommand(t *testing.T) { - expectedCommandNames := []string{ - "client", - "init", - "server-dump", - "start", - "version", - } - actualCommandNames := []string{} - r := NewDefraCommand(config.DefaultConfig()) - for _, c := range r.RootCmd.Commands() { - actualCommandNames = append(actualCommandNames, c.Name()) - } - for _, expectedCommandName := range expectedCommandNames { - assert.Contains(t, actualCommandNames, expectedCommandName) - } - for _, c := range r.RootCmd.Commands() { - if c.Name() == "client" { - assert.NotEmpty(t, c.Commands()) - } - } -} - -func TestAllHaveUsage(t *testing.T) { - cfg := config.DefaultConfig() - defra := NewDefraCommand(cfg) - walkCommandTree(t, defra.RootCmd, func(c *cobra.Command) { - assert.NotEmpty(t, c.Use) - }) -} - -func walkCommandTree(t *testing.T, cmd *cobra.Command, f func(*cobra.Command)) { - f(cmd) - for _, c := range cmd.Commands() { - walkCommandTree(t, c, f) - } -} diff --git a/cli/client.go b/cli/client.go index 2456df8d43..8866294f69 100644 --- a/cli/client.go +++ b/cli/client.go @@ -12,15 +12,27 @@ package cli import ( "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" ) -func MakeClientCommand() *cobra.Command { +func MakeClientCommand(cfg *config.Config) *cobra.Command { + var txID uint64 var cmd = &cobra.Command{ Use: "client", Short: "Interact with a DefraDB node", Long: `Interact with a DefraDB node. Execute queries, add schema types, obtain node info, etc.`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := loadConfig(cfg); err != nil { + return err + } + if err := setTransactionContext(cmd, cfg, txID); err != nil { + return err + } + return setStoreContext(cmd, cfg) + }, } - + cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") return cmd } diff --git a/cli/collection.go b/cli/collection.go new file mode 100644 index 0000000000..e21c29283b --- /dev/null +++ b/cli/collection.go @@ -0,0 +1,77 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeCollectionCommand(cfg *config.Config) *cobra.Command { + var txID uint64 + var name string + var schemaID string + var versionID string + var cmd = &cobra.Command{ + Use: "collection [--name --schema --version ]", + Short: "Interact with a collection.", + Long: `Create, read, update, and delete documents within a collection.`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + // cobra does not chain pre run calls so we have to run them again here + if err := loadConfig(cfg); err != nil { + return err + } + if err := setTransactionContext(cmd, cfg, txID); err != nil { + return err + } + if err := setStoreContext(cmd, cfg); err != nil { + return err + } + store := mustGetStoreContext(cmd) + + var col client.Collection + switch { + case versionID != "": + col, err = store.GetCollectionByVersionID(cmd.Context(), versionID) + + case schemaID != "": + col, err = store.GetCollectionBySchemaID(cmd.Context(), schemaID) + + case name != "": + col, err = store.GetCollectionByName(cmd.Context(), name) + + default: + return nil + } + + if err != nil { + return err + } + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) + } + + ctx := context.WithValue(cmd.Context(), colContextKey, col) + cmd.SetContext(ctx) + return nil + }, + } + cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") + cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name") + cmd.PersistentFlags().StringVar(&schemaID, "schema", "", "Collection schema ID") + cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID") + return cmd +} diff --git a/cli/collection_create.go b/cli/collection_create.go new file mode 100644 index 0000000000..4dca9be33a --- /dev/null +++ b/cli/collection_create.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionCreateCommand() *cobra.Command { + var file string + var cmd = &cobra.Command{ + Use: "create ", + Short: "Create a new document.", + Long: `Create a new document. + +Example: create from string + defradb client collection create --name User '{ "name": "Bob" }' + +Example: create multiple from string + defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]' + +Example: create from file + defradb client collection create --name User -f document.json + +Example: create from stdin + cat document.json | defradb client collection create --name User - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + var docData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + docData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + docData = data + case len(args) == 1: + docData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var docMap any + if err := json.Unmarshal(docData, &docMap); err != nil { + return err + } + + switch t := docMap.(type) { + case map[string]any: + doc, err := client.NewDocFromMap(t) + if err != nil { + return err + } + return col.Create(cmd.Context(), doc) + case []any: + docs := make([]*client.Document, len(t)) + for i, v := range t { + docMap, ok := v.(map[string]any) + if !ok { + return ErrInvalidDocument + } + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return err + } + docs[i] = doc + } + return col.CreateMany(cmd.Context(), docs) + default: + return ErrInvalidDocument + } + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + return cmd +} diff --git a/cli/collection_delete.go b/cli/collection_delete.go new file mode 100644 index 0000000000..85539d5eb3 --- /dev/null +++ b/cli/collection_delete.go @@ -0,0 +1,78 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionDeleteCommand() *cobra.Command { + var keys []string + var filter string + var cmd = &cobra.Command{ + Use: "delete [--filter --key ]", + Short: "Delete documents by key or filter.", + Long: `Delete documents by key or filter and lists the number of documents deleted. + +Example: delete by key(s) + defradb client collection delete --name User --key bae-123,bae-456 + +Example: delete by filter + defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' + `, + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + switch { + case len(keys) == 1: + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + res, err := col.DeleteWithKey(cmd.Context(), docKey) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) > 1: + docKeys := make([]client.DocKey, len(keys)) + for i, v := range keys { + docKey, err := client.NewDocKeyFromString(v) + if err != nil { + return err + } + docKeys[i] = docKey + } + res, err := col.DeleteWithKeys(cmd.Context(), docKeys) + if err != nil { + return err + } + return writeJSON(cmd, res) + case filter != "": + res, err := col.DeleteWithFilter(cmd.Context(), filter) + if err != nil { + return err + } + return writeJSON(cmd, res) + default: + return ErrNoDocKeyOrFilter + } + }, + } + cmd.Flags().StringSliceVar(&keys, "key", nil, "Document key") + cmd.Flags().StringVar(&filter, "filter", "", "Document filter") + return cmd +} diff --git a/cli/collection_describe.go b/cli/collection_describe.go new file mode 100644 index 0000000000..1d6ee55821 --- /dev/null +++ b/cli/collection_describe.go @@ -0,0 +1,57 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionDescribeCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "describe", + Short: "View collection description.", + Long: `Introspect collection types. + +Example: view all collections + defradb client collection describe + +Example: view collection by name + defradb client collection describe --name User + +Example: view collection by schema id + defradb client collection describe --schema bae123 + +Example: view collection by version id + defradb client collection describe --version bae123 + `, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + col, ok := tryGetCollectionContext(cmd) + if ok { + return writeJSON(cmd, col.Description()) + } + // if no collection specified list all collections + cols, err := store.GetAllCollections(cmd.Context()) + if err != nil { + return err + } + colDesc := make([]client.CollectionDescription, len(cols)) + for i, col := range cols { + colDesc[i] = col.Description() + } + return writeJSON(cmd, colDesc) + }, + } + return cmd +} diff --git a/cli/collection_get.go b/cli/collection_get.go new file mode 100644 index 0000000000..d908bbdb7a --- /dev/null +++ b/cli/collection_get.go @@ -0,0 +1,53 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionGetCommand() *cobra.Command { + var showDeleted bool + var cmd = &cobra.Command{ + Use: "get [--show-deleted]", + Short: "View document fields.", + Long: `View document fields. + +Example: + defradb client collection get --name User bae-123 + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + docKey, err := client.NewDocKeyFromString(args[0]) + if err != nil { + return err + } + doc, err := col.Get(cmd.Context(), docKey, showDeleted) + if err != nil { + return err + } + docMap, err := doc.ToMap() + if err != nil { + return err + } + return writeJSON(cmd, docMap) + }, + } + cmd.Flags().BoolVar(&showDeleted, "show-deleted", false, "Show deleted documents") + return cmd +} diff --git a/cli/collection_keys.go b/cli/collection_keys.go new file mode 100644 index 0000000000..a453c16a86 --- /dev/null +++ b/cli/collection_keys.go @@ -0,0 +1,53 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/http" +) + +func MakeCollectionKeysCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "keys", + Short: "List all document keys.", + Long: `List all document keys. + +Example: + defradb client collection keys --name User + `, + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + docCh, err := col.GetAllDocKeys(cmd.Context()) + if err != nil { + return err + } + for docKey := range docCh { + results := &http.DocKeyResult{ + Key: docKey.Key.String(), + } + if docKey.Err != nil { + results.Error = docKey.Err.Error() + } + if err := writeJSON(cmd, results); err != nil { + return err + } + } + return nil + }, + } + return cmd +} diff --git a/cli/collection_update.go b/cli/collection_update.go new file mode 100644 index 0000000000..317a2e8119 --- /dev/null +++ b/cli/collection_update.go @@ -0,0 +1,99 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionUpdateCommand() *cobra.Command { + var keys []string + var filter string + var updater string + var cmd = &cobra.Command{ + Use: "update [--filter --key --updater ] ", + Short: "Update documents by key or filter.", + Long: `Update documents by key or filter. + +Example: update from string + defradb client collection update --name User --key bae-123 '{ "name": "Bob" }' + +Example: update by filter + defradb client collection update --name User \ + --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }' + +Example: update by keys + defradb client collection update --name User \ + --key bae-123,bae-456 --updater '{ "verified": true }' + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + switch { + case len(keys) == 1 && updater != "": + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + res, err := col.UpdateWithKey(cmd.Context(), docKey, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) > 1 && updater != "": + docKeys := make([]client.DocKey, len(keys)) + for i, v := range keys { + docKey, err := client.NewDocKeyFromString(v) + if err != nil { + return err + } + docKeys[i] = docKey + } + res, err := col.UpdateWithKeys(cmd.Context(), docKeys, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case filter != "" && updater != "": + res, err := col.UpdateWithFilter(cmd.Context(), filter, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) == 1 && len(args) == 1: + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + doc, err := col.Get(cmd.Context(), docKey, true) + if err != nil { + return err + } + if err := doc.SetWithJSON([]byte(args[0])); err != nil { + return err + } + return col.Update(cmd.Context(), doc) + default: + return ErrNoDocKeyOrFilter + } + }, + } + cmd.Flags().StringSliceVar(&keys, "key", nil, "Document key") + cmd.Flags().StringVar(&filter, "filter", "", "Document filter") + cmd.Flags().StringVar(&updater, "updater", "", "Document updater") + return cmd +} diff --git a/cli/dump.go b/cli/dump.go index f35e9232b1..a3d155605b 100644 --- a/cli/dump.go +++ b/cli/dump.go @@ -11,69 +11,18 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/client" ) -func MakeDumpCommand(cfg *config.Config) *cobra.Command { +func MakeDumpCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "dump", Short: "Dump the contents of DefraDB node-side", RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Requesting the database to dump its state, server-side...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.DumpPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed dump request", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - // dumpResponse follows structure of HTTP API's response - type dumpResponse struct { - Data struct { - Response string `json:"response"` - } `json:"data"` - } - r := dumpResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("failed parsing of response", err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Response) - } - return nil + db := cmd.Context().Value(dbContextKey).(client.DB) + return db.PrintDump(cmd.Context()) }, } return cmd diff --git a/cli/errors.go b/cli/errors.go index 17e4819a8b..a7d6cbd26b 100644 --- a/cli/errors.go +++ b/cli/errors.go @@ -11,133 +11,20 @@ package cli import ( - "strings" - "github.com/sourcenetwork/defradb/errors" ) -const ( - errMissingArg string = "missing argument" - errMissingArgs string = "missing arguments" - errTooManyArgs string = "too many arguments" - errEmptyStdin string = "empty stdin" - errEmptyFile string = "empty file" - errFailedToReadFile string = "failed to read file" - errFailedToReadStdin string = "failed to read stdin" - errFailedToCreateRPCClient string = "failed to create RPC client" - errFailedToAddReplicator string = "failed to add replicator, request failed" - errFailedToJoinEndpoint string = "failed to join endpoint" - errFailedToSendRequest string = "failed to send request" - errFailedToReadResponseBody string = "failed to read response body" - errFailedToCloseResponseBody string = "failed to close response body" - errFailedToStatStdOut string = "failed to stat stdout" - errFailedToHandleGQLErrors string = "failed to handle GraphQL errors" - errFailedToPrettyPrintResponse string = "failed to pretty print response" - errFailedToUnmarshalResponse string = "failed to unmarshal response" - errFailedParsePeerID string = "failed to parse PeerID" - errFailedToMarshalData string = "failed to marshal data" - errInvalidArgumentLength string = "invalid argument length" -) +const errInvalidLensConfig = "invalid lens configuration" -// Errors returnable from this package. -// -// This list is incomplete and undefined errors may also be returned. -// Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrMissingArg = errors.New(errMissingArg) - ErrMissingArgs = errors.New(errMissingArgs) - ErrTooManyArgs = errors.New(errTooManyArgs) - ErrEmptyFile = errors.New(errEmptyFile) - ErrEmptyStdin = errors.New(errEmptyStdin) - ErrFailedToReadFile = errors.New(errFailedToReadFile) - ErrFailedToReadStdin = errors.New(errFailedToReadStdin) - ErrFailedToCreateRPCClient = errors.New(errFailedToCreateRPCClient) - ErrFailedToAddReplicator = errors.New(errFailedToAddReplicator) - ErrFailedToJoinEndpoint = errors.New(errFailedToJoinEndpoint) - ErrFailedToSendRequest = errors.New(errFailedToSendRequest) - ErrFailedToReadResponseBody = errors.New(errFailedToReadResponseBody) - ErrFailedToStatStdOut = errors.New(errFailedToStatStdOut) - ErrFailedToHandleGQLErrors = errors.New(errFailedToHandleGQLErrors) - ErrFailedToPrettyPrintResponse = errors.New(errFailedToPrettyPrintResponse) - ErrFailedToUnmarshalResponse = errors.New(errFailedToUnmarshalResponse) - ErrFailedParsePeerID = errors.New(errFailedParsePeerID) - ErrInvalidExportFormat = errors.New("invalid export format") - ErrInvalidArgumentLength = errors.New(errInvalidArgumentLength) + ErrNoDocOrFile = errors.New("document or file must be defined") + ErrInvalidDocument = errors.New("invalid document") + ErrNoDocKeyOrFilter = errors.New("document key or filter must be defined") + ErrInvalidExportFormat = errors.New("invalid export format") + ErrNoLensConfig = errors.New("lens config cannot be empty") + ErrInvalidLensConfig = errors.New("invalid lens configuration") ) -func NewErrMissingArg(name string) error { - return errors.New(errMissingArg, errors.NewKV("Name", name)) -} - -func NewErrMissingArgs(names []string) error { - return errors.New(errMissingArgs, errors.NewKV("Required", strings.Join(names, ", "))) -} - -func NewErrTooManyArgs(max, actual int) error { - return errors.New(errTooManyArgs, errors.NewKV("Max", max), errors.NewKV("Actual", actual)) -} - -func NewFailedToReadFile(inner error) error { - return errors.Wrap(errFailedToReadFile, inner) -} - -func NewFailedToReadStdin(inner error) error { - return errors.Wrap(errFailedToReadStdin, inner) -} - -func NewErrFailedToCreateRPCClient(inner error) error { - return errors.Wrap(errFailedToCreateRPCClient, inner) -} - -func NewErrFailedToAddReplicator(inner error) error { - return errors.Wrap(errFailedToAddReplicator, inner) -} - -func NewErrFailedToJoinEndpoint(inner error) error { - return errors.Wrap(errFailedToJoinEndpoint, inner) -} - -func NewErrFailedToSendRequest(inner error) error { - return errors.Wrap(errFailedToSendRequest, inner) -} - -func NewErrFailedToReadResponseBody(inner error) error { - return errors.Wrap(errFailedToReadResponseBody, inner) -} - -func NewErrFailedToCloseResponseBody(closeErr, other error) error { - if other != nil { - return errors.Wrap(errFailedToCloseResponseBody, closeErr, errors.NewKV("Other error", other)) - } - return errors.Wrap(errFailedToCloseResponseBody, closeErr) -} - -func NewErrFailedToStatStdOut(inner error) error { - return errors.Wrap(errFailedToStatStdOut, inner) -} - -func NewErrFailedToHandleGQLErrors(inner error) error { - return errors.Wrap(errFailedToHandleGQLErrors, inner) -} - -func NewErrFailedToPrettyPrintResponse(inner error) error { - return errors.Wrap(errFailedToPrettyPrintResponse, inner) -} - -func NewErrFailedToUnmarshalResponse(inner error) error { - return errors.Wrap(errFailedToUnmarshalResponse, inner) -} - -func NewErrFailedParsePeerID(inner error) error { - return errors.Wrap(errFailedParsePeerID, inner) -} - -// NewFailedToMarshalData returns an error indicating that a there was a problem with mashalling. -func NewFailedToMarshalData(inner error) error { - return errors.Wrap(errFailedToMarshalData, inner) -} - -// NewErrInvalidArgumentLength returns an error indicating an incorrect number of arguments. -func NewErrInvalidArgumentLength(inner error, expected int) error { - return errors.Wrap(errInvalidArgumentLength, inner, errors.NewKV("Expected", expected)) +func NewErrInvalidLensConfig(inner error) error { + return errors.Wrap(errInvalidLensConfig, inner) } diff --git a/cli/index_create.go b/cli/index_create.go index a91a76d2d0..42866267fc 100644 --- a/cli/index_create.go +++ b/cli/index_create.go @@ -11,33 +11,16 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexCreateResponse struct { - Data struct { - Index client.IndexDescription `json:"index"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexCreateCommand(cfg *config.Config) *cobra.Command { +func MakeIndexCreateCommand() *cobra.Command { var collectionArg string var nameArg string - var fieldsArg string + var fieldsArg []string var cmd = &cobra.Command{ Use: "create -c --collection --fields [-n --name ]", Short: "Creates a secondary index on a collection's field(s)", @@ -51,75 +34,34 @@ Example: create an index for 'Users' collection on 'name' field: Example: create a named index for 'Users' collection on 'name' field: defradb client index create --collection Users --fields name --name UsersByName`, ValidArgs: []string{"collection", "fields", "name"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if collectionArg == "" || fieldsArg == "" { - if collectionArg == "" { - return NewErrMissingArg("collection") - } else { - return NewErrMissingArg("fields") - } - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - data := map[string]string{ - "collection": collectionArg, - "fields": fieldsArg, + var fields []client.IndexedFieldDescription + for _, name := range fieldsArg { + fields = append(fields, client.IndexedFieldDescription{Name: name}) } - if nameArg != "" { - data["name"] = nameArg + desc := client.IndexDescription{ + Name: nameArg, + Fields: fields, } - - jsonData, err := json.Marshal(data) + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) if err != nil { return err } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - - stdout, err := os.Stdout.Stat() + desc, err = col.CreateIndex(cmd.Context(), desc) if err != nil { return err } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexCreateResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to create index", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully created index", - logging.NewKV("Index", r.Data.Index)) - } - } - return nil + return writeJSON(cmd, desc) }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") - cmd.Flags().StringVar(&fieldsArg, "fields", "", "Fields to index") + cmd.Flags().StringSliceVar(&fieldsArg, "fields", []string{}, "Fields to index") return cmd } diff --git a/cli/index_create_test.go b/cli/index_create_test.go deleted file mode 100644 index ac75248c10..0000000000 --- a/cli/index_create_test.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" -) - -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - -func getTestConfig(t *testing.T) *config.Config { - cfg := config.DefaultConfig() - dir := t.TempDir() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = false - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr - cfg.API.Address = "0.0.0.0:0" - return cfg -} - -func startTestNode(t *testing.T) (*config.Config, *defraInstance, func()) { - cfg := getTestConfig(t) - - ctx := context.Background() - di, err := start(ctx, cfg) - require.NoError(t, err) - return cfg, di, func() { di.close(ctx) } -} - -func parseLines(r io.Reader) ([]map[string]any, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []map[string]any{} - for fileScanner.Scan() { - loggedLine := make(map[string]any) - err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) - if err != nil { - return nil, err - } - logLines = append(logLines, loggedLine) - } - - return logLines, nil -} - -func lineHas(lines []map[string]any, key, value string) bool { - for _, line := range lines { - if line[key] == value { - return true - } - } - return false -} - -func simulateConsoleOutput(t *testing.T) (*bytes.Buffer, func()) { - b := &bytes.Buffer{} - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - Pipe: b, - }) - - f, err := os.CreateTemp(t.TempDir(), "tmpFile") - require.NoError(t, err) - originalStdout := os.Stdout - os.Stdout = f - - return b, func() { - os.Stdout = originalStdout - f.Close() - os.Remove(f.Name()) - } -} - -func execAddSchemaCmd(t *testing.T, cfg *config.Config, schema string) { - addSchemaCmd := MakeSchemaAddCommand(cfg) - err := addSchemaCmd.RunE(addSchemaCmd, []string{schema}) - require.NoError(t, err) -} - -func execCreateIndexCmd(t *testing.T, cfg *config.Config, collection, fields, name string) { - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", collection, - "--fields", fields, - "--name", name, - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) -} - -func hasLogWithKey(logLines []map[string]any, key string) bool { - for _, logLine := range logLines { - if _, ok := logLine[key]; ok { - return true - } - } - return false -} - -func TestIndexCreateCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexCreateCmd := MakeIndexCreateCommand(cfg) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexCreateCmd_IfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexCreateCmd := MakeIndexCreateCommand(cfg) - - outputBuf := bytes.NewBufferString("") - indexCreateCmd.SetOut(outputBuf) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasErrors := r["errors"] - assert.True(t, hasErrors, "command should return error") -} - -func TestIndexCreateCmd_IfNoErrors_ReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - - indexCreateCmd := MakeIndexCreateCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexCreateCmd.SetOut(outputBuf) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexCreateCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexCreateCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - assert.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexCreateCmd_WithConsoleOutputIfNoErrors_ReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - - const indexName = "users_name_index" - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "name", - "--name", indexName, - }) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexCreateCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - result, ok := logLines[0]["Index"].(map[string]any) - require.True(t, ok) - assert.Equal(t, indexName, result["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/index_drop.go b/cli/index_drop.go index ef0a37db0c..03639fb277 100644 --- a/cli/index_drop.go +++ b/cli/index_drop.go @@ -11,29 +11,12 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexDropResponse struct { - Data struct { - Result string `json:"result"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { +func MakeIndexDropCommand() *cobra.Command { var collectionArg string var nameArg string var cmd = &cobra.Command{ @@ -44,74 +27,17 @@ func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { Example: drop the index 'UsersByName' for 'Users' collection: defradb client index create --collection Users --name UsersByName`, ValidArgs: []string{"collection", "name"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if collectionArg == "" || nameArg == "" { - if collectionArg == "" { - return NewErrMissingArg("collection") - } else { - return NewErrMissingArg("name") - } - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - data := map[string]string{ - "collection": collectionArg, - "name": nameArg, - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - jsonData, err := json.Marshal(data) + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) if err != nil { return err } - - req, err := http.NewRequest("DELETE", endpoint.String(), bytes.NewBuffer(jsonData)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - req.Header.Add("Content-Type", "application/json") - client := &http.Client{} - res, err := client.Do(req) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexDropResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to drop index", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully dropped index", - logging.NewKV("Result", r.Data.Result)) - } + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - return nil + return col.DropIndex(cmd.Context(), nameArg) }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") diff --git a/cli/index_drop_test.go b/cli/index_drop_test.go deleted file mode 100644 index 7fa368a458..0000000000 --- a/cli/index_drop_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexDropCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexDropCmd := MakeIndexDropCommand(cfg) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexDropCmd_IfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexDropCmd := MakeIndexDropCommand(cfg) - - outputBuf := bytes.NewBufferString("") - indexDropCmd.SetOut(outputBuf) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasErrors := r["errors"] - assert.True(t, hasErrors, "command should return error") -} - -func TestIndexDropCmd_IfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexDropCmd := MakeIndexDropCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexDropCmd.SetOut(outputBuf) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexDropCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexDropCmd := MakeIndexDropCommand(cfg) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - assert.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexDropCmd_WithConsoleOutputIfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexDropCmd := MakeIndexDropCommand(cfg) - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexDropCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - assert.Equal(t, "success", logLines[0]["Result"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/index_list.go b/cli/index_list.go index 131782cfe5..92ada3e007 100644 --- a/cli/index_list.go +++ b/cli/index_list.go @@ -11,31 +11,12 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "net/url" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexListResponse struct { - Data struct { - Collections map[string][]client.IndexDescription `json:"collections"` - Indexes []client.IndexDescription `json:"indexes"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexListCommand(cfg *config.Config) *cobra.Command { +func MakeIndexListCommand() *cobra.Command { var collectionArg string var cmd = &cobra.Command{ Use: "list [-c --collection ]", @@ -48,60 +29,30 @@ Otherwise, all indexes in the database will be shown. Example: show all index for 'Users' collection: defradb client index list --collection Users`, ValidArgs: []string{"collection"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if collectionArg != "" { - values := url.Values{ - "collection": {collectionArg}, + switch { + case collectionArg != "": + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) + if err != nil { + return err } - endpoint.RawQuery = values.Encode() - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexListResponse{} - err = json.Unmarshal(response, &r) + indexes, err := col.GetIndexes(cmd.Context()) if err != nil { - return NewErrFailedToUnmarshalResponse(err) + return err } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to list index", - logging.NewKV("Errors", r.Errors)) - } else if collectionArg != "" { - log.FeedbackInfo(cmd.Context(), "Fetched indexes for collection "+collectionArg, - logging.NewKV("Indexes", r.Data.Indexes)) - } else { - log.FeedbackInfo(cmd.Context(), "Fetched all indexes", - logging.NewKV("Collections", r.Data.Collections)) + return writeJSON(cmd, indexes) + default: + indexes, err := store.GetAllIndexes(cmd.Context()) + if err != nil { + return err } + return writeJSON(cmd, indexes) } - return nil }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") diff --git a/cli/index_list_test.go b/cli/index_list_test.go deleted file mode 100644 index 548d2af040..0000000000 --- a/cli/index_list_test.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexListCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexCreateCmd := MakeIndexListCommand(cfg) - - err := indexCreateCmd.RunE(indexCreateCmd, nil) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexListCmd_IfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexListCmd := MakeIndexListCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexListCmd.SetOut(outputBuf) - - err := indexListCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexListCmd_WithConsoleOutputIfCollectionDoesNotExist_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - indexListCmd := MakeIndexListCommand(cfg) - indexListCmd.SetArgs([]string{"--collection", "User"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexListCmd_WithConsoleOutputIfCollectionIsGiven_ReturnCollectionList(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - const indexName = "users_name_index" - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", indexName) - - indexListCmd := MakeIndexListCommand(cfg) - indexListCmd.SetArgs([]string{"--collection", "User"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - resultList, ok := logLines[0]["Indexes"].([]any) - require.True(t, ok) - require.Len(t, resultList, 1) - result, ok := resultList[0].(map[string]any) - require.True(t, ok) - assert.Equal(t, indexName, result["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexListCmd_WithConsoleOutputIfNoArgs_ReturnAllIndexes(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - const userIndexName = "users_name_index" - const productIndexName = "product_price_index" - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execAddSchemaCmd(t, cfg, `type Product { price: Int }`) - execCreateIndexCmd(t, cfg, "User", "name", userIndexName) - execCreateIndexCmd(t, cfg, "Product", "price", productIndexName) - - indexListCmd := MakeIndexListCommand(cfg) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - resultCollections, ok := logLines[0]["Collections"].(map[string]any) - require.True(t, ok) - - userCollection, ok := resultCollections["User"].([]any) - require.True(t, ok) - require.Len(t, userCollection, 1) - userIndex, ok := userCollection[0].(map[string]any) - require.True(t, ok) - require.Equal(t, userIndexName, userIndex["Name"]) - - productCollection, ok := resultCollections["Product"].([]any) - require.True(t, ok) - require.Len(t, productCollection, 1) - productIndex, ok := productCollection[0].(map[string]any) - require.True(t, ok) - require.Equal(t, productIndexName, productIndex["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/blocks.go b/cli/p2p.go similarity index 75% rename from cli/blocks.go rename to cli/p2p.go index 9e55c36d22..ee084cc67b 100644 --- a/cli/blocks.go +++ b/cli/p2p.go @@ -14,11 +14,11 @@ import ( "github.com/spf13/cobra" ) -func MakeBlocksCommand() *cobra.Command { +func MakeP2PCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "blocks", - Short: "Interact with the database's blockstore", + Use: "p2p", + Short: "Interact with the DefraDB P2P system", + Long: "Interact with the DefraDB P2P system", } - return cmd } diff --git a/cli/p2p_collection.go b/cli/p2p_collection.go index 6ce6d8e7c7..140ac4cc34 100644 --- a/cli/p2p_collection.go +++ b/cli/p2p_collection.go @@ -16,7 +16,7 @@ import ( func MakeP2PCollectionCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "p2pcollection", + Use: "collection", Short: "Configure the P2P collection system", Long: `Add, delete, or get the list of P2P collections. The selected collections synchronize their events on the pubsub network.`, diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index 46a4f171e1..6970e8daec 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -11,51 +11,19 @@ package cli import ( - "context" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionAddCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "add [collectionID]", Short: "Add P2P collections", Long: `Add P2P collections to the synchronized pubsub topics. The collections are synchronized between nodes of a pubsub network.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return errors.New("must specify at least one collectionID") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - err = client.AddP2PCollections(ctx, args...) - if err != nil { - return errors.Wrap("failed to add P2P collections, request failed", err) - } - log.FeedbackInfo(ctx, "Successfully added P2P collections", logging.NewKV("Collections", args)) - return nil + store := mustGetStoreContext(cmd) + return store.AddP2PCollection(cmd.Context(), args[0]) }, } return cmd diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index cb9c9f4025..c07a63f453 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -11,60 +11,24 @@ package cli import ( - "context" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionGetAllCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all P2P collections", Long: `Get all P2P collections in the pubsub topics. This is the list of collections of the node that are synchronized on the pubsub network.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.NoArgs(cmd, args); err != nil { - return errors.New("must specify no argument") - } - return nil - }, + Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } + store := mustGetStoreContext(cmd) - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() + cols, err := store.GetAllP2PCollections(cmd.Context()) if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) + return err } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - collections, err := client.GetAllP2PCollections(ctx) - if err != nil { - return errors.Wrap("failed to add P2P collections, request failed", err) - } - - if len(collections) > 0 { - log.FeedbackInfo(ctx, "Successfully got all P2P collections") - for _, col := range collections { - log.FeedbackInfo(ctx, col.Name, logging.NewKV("CollectionID", col.ID)) - } - } else { - log.FeedbackInfo(ctx, "No P2P collection found") - } - - return nil + return writeJSON(cmd, cols) }, } return cmd diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index 66dbd5fa16..ed67f5e7c6 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -11,51 +11,19 @@ package cli import ( - "context" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionRemoveCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "remove [collectionID]", Short: "Remove P2P collections", Long: `Remove P2P collections from the followed pubsub topics. The removed collections will no longer be synchronized between nodes.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return errors.New("must specify at least one collectionID") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - err = client.RemoveP2PCollections(ctx, args...) - if err != nil { - return errors.Wrap("failed to remove P2P collections, request failed", err) - } - log.FeedbackInfo(ctx, "Successfully removed P2P collections", logging.NewKV("Collections", args)) - return nil + store := mustGetStoreContext(cmd) + return store.RemoveP2PCollection(cmd.Context(), args[0]) }, } return cmd diff --git a/cli/p2p_info.go b/cli/p2p_info.go new file mode 100644 index 0000000000..1ddad18a52 --- /dev/null +++ b/cli/p2p_info.go @@ -0,0 +1,35 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/http" +) + +func MakeP2PInfoCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "info", + Short: "Get peer info from a DefraDB node", + Long: `Get peer info from a DefraDB node`, + RunE: func(cmd *cobra.Command, args []string) error { + db := cmd.Context().Value(dbContextKey).(*http.Client) + + res, err := db.PeerInfo(cmd.Context()) + if err != nil { + return err + } + return writeJSON(cmd, res) + }, + } + return cmd +} diff --git a/cli/replicator.go b/cli/p2p_replicator.go similarity index 93% rename from cli/replicator.go rename to cli/p2p_replicator.go index c7956c80a6..d12684be51 100644 --- a/cli/replicator.go +++ b/cli/p2p_replicator.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" ) -func MakeReplicatorCommand() *cobra.Command { +func MakeP2PReplicatorCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "replicator", Short: "Configure the replicator system", diff --git a/cli/p2p_replicator_delete.go b/cli/p2p_replicator_delete.go new file mode 100644 index 0000000000..7504d0c932 --- /dev/null +++ b/cli/p2p_replicator_delete.go @@ -0,0 +1,37 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeP2PReplicatorDeleteCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "delete ", + Short: "Delete a replicator. It will stop synchronizing", + Long: `Delete a replicator. It will stop synchronizing.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + addr, err := peer.AddrInfoFromString(args[0]) + if err != nil { + return err + } + return store.DeleteReplicator(cmd.Context(), client.Replicator{Info: *addr}) + }, + } + return cmd +} diff --git a/cli/p2p_replicator_getall.go b/cli/p2p_replicator_getall.go new file mode 100644 index 0000000000..9192ed4d10 --- /dev/null +++ b/cli/p2p_replicator_getall.go @@ -0,0 +1,34 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeP2PReplicatorGetAllCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "getall", + Short: "Get all replicators", + Long: `Get all the replicators active in the P2P data sync system. +These are the replicators that are currently replicating data from one node to another.`, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + reps, err := store.GetAllReplicators(cmd.Context()) + if err != nil { + return err + } + return writeJSON(cmd, reps) + }, + } + return cmd +} diff --git a/cli/p2p_replicator_set.go b/cli/p2p_replicator_set.go new file mode 100644 index 0000000000..6b590b6ea7 --- /dev/null +++ b/cli/p2p_replicator_set.go @@ -0,0 +1,47 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeP2PReplicatorSetCommand() *cobra.Command { + var collections []string + var cmd = &cobra.Command{ + Use: "set [-c, --collection] ", + Short: "Set a P2P replicator", + Long: `Add a new target replicator. +A replicator replicates one or all collection(s) from this node to another. +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + addr, err := peer.AddrInfoFromString(args[0]) + if err != nil { + return err + } + rep := client.Replicator{ + Info: *addr, + Schemas: collections, + } + return store.SetReplicator(cmd.Context(), rep) + }, + } + + cmd.Flags().StringSliceVarP(&collections, "collection", "c", + []string{}, "Define the collection for the replicator") + return cmd +} diff --git a/cli/peerid.go b/cli/peerid.go deleted file mode 100644 index a3d269fb2d..0000000000 --- a/cli/peerid.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -func MakePeerIDCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "peerid", - Short: "Get the PeerID of the node", - Long: `Get the PeerID of the node.`, - RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Requesting PeerID...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.PeerIDPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed to request PeerID", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if res.StatusCode == http.StatusNotFound { - r := httpapi.ErrorResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - if len(r.Errors) > 0 { - if isFileInfoPipe(stdout) { - b, err := json.Marshal(r.Errors[0]) - if err != nil { - return errors.Wrap("mashalling error response failed", err) - } - cmd.Println(string(b)) - } else { - log.FeedbackInfo(cmd.Context(), r.Errors[0].Message) - } - return nil - } - return errors.New("no PeerID available. P2P might be disabled") - } - - r := httpapi.DataResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - if isFileInfoPipe(stdout) { - b, err := json.Marshal(r.Data) - if err != nil { - return errors.Wrap("mashalling data response failed", err) - } - cmd.Println(string(b)) - } else if data, ok := r.Data.(map[string]any); ok { - log.FeedbackInfo(cmd.Context(), data["peerID"].(string)) - } - - return nil - }, - } - return cmd -} diff --git a/cli/peerid_test.go b/cli/peerid_test.go deleted file mode 100644 index 34874ef80d..0000000000 --- a/cli/peerid_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - - httpapi "github.com/sourcenetwork/defradb/api/http" -) - -func TestGetPeerIDCmd(t *testing.T) { - cfg := getTestConfig(t) - peerIDCmd := MakePeerIDCommand(cfg) - dir := t.TempDir() - ctx := context.Background() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = false - - di, err := start(ctx, cfg) - if err != nil { - t.Fatal(err) - } - defer di.close(ctx) - - b := bytes.NewBufferString("") - peerIDCmd.SetOut(b) - - err = peerIDCmd.RunE(peerIDCmd, nil) - if err != nil { - t.Fatal(err) - } - - out, err := io.ReadAll(b) - if err != nil { - t.Fatal(err) - } - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, di.node.PeerID().String(), r["peerID"]) -} - -func TestGetPeerIDCmdWithNoP2P(t *testing.T) { - cfg := getTestConfig(t) - peerIDCmd := MakePeerIDCommand(cfg) - dir := t.TempDir() - ctx := context.Background() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = true - - di, err := start(ctx, cfg) - if err != nil { - t.Fatal(err) - } - defer di.close(ctx) - - b := bytes.NewBufferString("") - peerIDCmd.SetOut(b) - - err = peerIDCmd.RunE(peerIDCmd, nil) - if err != nil { - t.Fatal(err) - } - - out, err := io.ReadAll(b) - if err != nil { - t.Fatal(err) - } - - r := httpapi.ErrorItem{} - err = json.Unmarshal(out, &r) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, http.StatusNotFound, r.Extensions.Status) - assert.Equal(t, "Not Found", r.Extensions.HTTPError) - assert.Equal(t, "no PeerID available. P2P might be disabled", r.Message) -} diff --git a/cli/ping.go b/cli/ping.go deleted file mode 100644 index 210847dfcc..0000000000 --- a/cli/ping.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -func MakePingCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "ping", - Short: "Ping to test connection with a node", - RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Sending ping...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.PingPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed to send ping", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type pingResponse struct { - Data struct { - Response string `json:"response"` - } `json:"data"` - } - r := pingResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Response) - } - return nil - }, - } - return cmd -} diff --git a/cli/replicator_delete.go b/cli/replicator_delete.go deleted file mode 100644 index eb7e580f12..0000000000 --- a/cli/replicator_delete.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { - var ( - fullRep bool - col []string - ) - var cmd = &cobra.Command{ - Use: "delete [-f, --full | -c, --collection] ", - Short: "Delete a replicator. It will stop synchronizing", - Long: `Delete a replicator. It will stop synchronizing.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return errors.New("must specify one argument: PeerID") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - pidString := args[0] - - if len(col) == 0 && !fullRep { - return errors.New("must run with either --full or --collection") - } - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - pid, err := peer.Decode(pidString) - if err != nil { - return NewErrFailedParsePeerID(err) - } - - err = client.DeleteReplicator(ctx, pid) - if err != nil { - return errors.Wrap("failed to delete replicator, request failed", err) - } - log.FeedbackInfo(ctx, "Successfully deleted replicator", logging.NewKV("PeerID", pid.String())) - return nil - }, - } - cmd.Flags().BoolVarP(&fullRep, "full", "f", false, "Set the replicator to act on all collections") - cmd.Flags().StringArrayVarP(&col, "collection", "c", - []string{}, "Define the collection for the replicator") - cmd.MarkFlagsMutuallyExclusive("full", "collection") - return cmd -} diff --git a/cli/replicator_getall.go b/cli/replicator_getall.go deleted file mode 100644 index 63cd6533ba..0000000000 --- a/cli/replicator_getall.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorGetallCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "getall", - Short: "Get all replicators", - Long: `Get all the replicators active in the P2P data sync system. -These are the replicators that are currently replicating data from one node to another.`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 0 { - if err := cmd.Usage(); err != nil { - return err - } - return errors.New("must specify no argument") - } - - log.FeedbackInfo( - cmd.Context(), - "Getting all replicators", - logging.NewKV("RPCAddress", cfg.Net.RPCAddress), - ) - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return errors.Wrap("failed to create RPC client", err) - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - reps, err := client.GetAllReplicators(ctx) - if err != nil { - return errors.Wrap("failed to get replicators, request failed", err) - } - if len(reps) > 0 { - log.FeedbackInfo(ctx, "Successfully got all replicators") - for _, rep := range reps { - log.FeedbackInfo( - ctx, - rep.Info.ID.String(), - logging.NewKV("Schemas", rep.Schemas), - logging.NewKV("Addrs", rep.Info.Addrs), - ) - } - } else { - log.FeedbackInfo(ctx, "No replicator found") - } - - return nil - }, - } - return cmd -} diff --git a/cli/replicator_set.go b/cli/replicator_set.go deleted file mode 100644 index acb70d0cfd..0000000000 --- a/cli/replicator_set.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - ma "github.com/multiformats/go-multiaddr" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { - var ( - fullRep bool - col []string - ) - var cmd = &cobra.Command{ - Use: "set [-f, --full | -c, --collection] ", - Short: "Set a P2P replicator", - Long: `Add a new target replicator. -A replicator replicates one or all collection(s) from this node to another. -`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return errors.New("must specify one argument: peer") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - peerAddr, err := ma.NewMultiaddr(args[0]) - if err != nil { - return NewErrFailedParsePeerID(err) - } - if len(col) == 0 && !fullRep { - return errors.New("must run with either --full or --collection") - } - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - pid, err := client.SetReplicator(ctx, peerAddr, col...) - if err != nil { - return errors.Wrap("failed to add replicator, request failed", err) - } - log.FeedbackInfo( - ctx, - "Successfully added replicator", - logging.NewKV("PeerID", pid), - logging.NewKV("Collections", col), - ) - return nil - }, - } - - cmd.Flags().BoolVarP(&fullRep, "full", "f", false, "Set the replicator to act on all collections") - cmd.Flags().StringArrayVarP(&col, "collection", "c", - []string{}, "Define the collection for the replicator") - cmd.MarkFlagsMutuallyExclusive("full", "collection") - return cmd -} diff --git a/cli/request.go b/cli/request.go index 1b8f86ced8..56e33d7c4a 100644 --- a/cli/request.go +++ b/cli/request.go @@ -12,18 +12,19 @@ package cli import ( "io" - "net/http" - "net/url" "os" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/errors" ) -func MakeRequestCommand(cfg *config.Config) *cobra.Command { +const ( + REQ_RESULTS_HEADER = "------ Request Results ------\n" + SUB_RESULTS_HEADER = "------ Subscription Results ------\n" +) + +func MakeRequestCommand() *cobra.Command { var filePath string var cmd = &cobra.Command{ Use: "query [query request]", @@ -43,101 +44,43 @@ A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be u with the database more conveniently. To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var request string + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if filePath != "" { - bytes, err := os.ReadFile(filePath) + var request string + switch { + case filePath != "": + data, err := os.ReadFile(filePath) if err != nil { - return ErrFailedToReadFile - } - request = string(bytes) - } else if len(args) > 1 { - if err = cmd.Usage(); err != nil { return err } - return errors.New("too many arguments") - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client query -' to read from stdin. Example: 'cat my.graphql | defradb client query -').", - ) - return nil - } else if len(args) == 0 { - err := cmd.Help() + request = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to print help", err) - } - return nil - } else if args[0] == "-" { - stdin, err := readStdin() - if err != nil { - return errors.Wrap("failed to read stdin", err) - } - if len(stdin) == 0 { - return errors.New("no query request in stdin provided") - } else { - request = stdin + return err } - } else { - request = args[0] + request = string(data) + case len(args) > 0: + request = string(args[0]) } if request == "" { return errors.New("request cannot be empty") } + result := store.ExecRequest(cmd.Context(), request) - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.GraphQLPath) - if err != nil { - return errors.Wrap("joining paths failed", err) + var errors []string + for _, err := range result.GQL.Errors { + errors = append(errors, err.Error()) } - - p := url.Values{} - p.Add("query", request) - endpoint.RawQuery = p.Encode() - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed request", err) + if result.Pub == nil { + cmd.Print(REQ_RESULTS_HEADER) + return writeJSON(cmd, map[string]any{"data": result.GQL.Data, "errors": errors}) } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - fi, err = os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - - if isFileInfoPipe(fi) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return errors.Wrap("failed to handle GraphQL errors", err) - } - indentedResult, err := indentJSON(response) - if err != nil { - return errors.Wrap("failed to pretty print result", err) - } - if graphlErr { - log.FeedbackError(cmd.Context(), indentedResult) - } else { - log.FeedbackInfo(cmd.Context(), indentedResult) - } + cmd.Print(SUB_RESULTS_HEADER) + for item := range result.Pub.Stream() { + writeJSON(cmd, item) //nolint:errcheck } return nil }, diff --git a/cli/root.go b/cli/root.go index e639cde785..729b638f02 100644 --- a/cli/root.go +++ b/cli/root.go @@ -16,34 +16,19 @@ import ( "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" ) func MakeRootCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ - Use: "defradb", - Short: "DefraDB Edge Database", + SilenceUsage: true, + Use: "defradb", + Short: "DefraDB Edge Database", Long: `DefraDB is the edge database to power the user-centric future. Start a DefraDB node, interact with a local or remote node, and much more. `, - // Runs on subcommands before their Run function, to handle configuration and top-level flags. - // Loads the rootDir containing the configuration file, otherwise warn about it and load a default configuration. - // This allows some subcommands (`init`, `start`) to override the PreRun to create a rootDir by default. - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { - return err - } - if cfg.ConfigFileExists() { - if err := cfg.LoadWithRootdir(true); err != nil { - return errors.Wrap("failed to load config", err) - } - } else { - if err := cfg.LoadWithRootdir(false); err != nil { - return errors.Wrap("failed to load config", err) - } - } - return nil + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return loadConfig(cfg) }, } diff --git a/cli/rpc.go b/cli/rpc.go deleted file mode 100644 index afb1a007e2..0000000000 --- a/cli/rpc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/spf13/cobra" - - "github.com/sourcenetwork/defradb/config" -) - -func MakeRPCCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "rpc", - Short: "Interact with a DefraDB node via RPC", - Long: "Interact with a DefraDB node via RPC.", - } - cmd.PersistentFlags().String( - "addr", cfg.Net.RPCAddress, - "RPC endpoint address", - ) - - if err := cfg.BindFlag("net.rpcaddress", cmd.PersistentFlags().Lookup("addr")); err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.rpcaddress", err) - } - return cmd -} diff --git a/cli/schema_add.go b/cli/schema_add.go index b5f28f15d3..b93427a883 100644 --- a/cli/schema_add.go +++ b/cli/schema_add.go @@ -11,21 +11,14 @@ package cli import ( - "encoding/json" + "fmt" "io" - "net/http" "os" - "strings" "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaAddCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaAddCommand() *cobra.Command { var schemaFile string var cmd = &cobra.Command{ Use: "add [schema]", @@ -42,117 +35,34 @@ Example: add from stdin: cat schema.graphql | defradb client schema add - Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var schema string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if len(args) > 1 { - if err = cmd.Usage(); err != nil { - return err - } - return errors.New("too many arguments") - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if schemaFile != "" { - buf, err := os.ReadFile(schemaFile) - if err != nil { - return errors.Wrap("failed to read schema file", err) - } - schema = string(buf) - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema add -' to read from stdin."+ - " Example: 'cat schema.graphql | defradb client schema add -').", - ) - return nil - } else if len(args) == 0 { - err := cmd.Help() + var schema string + switch { + case schemaFile != "": + data, err := os.ReadFile(schemaFile) if err != nil { - return errors.Wrap("failed to print help", err) + return err } - return nil - } else if args[0] == "-" { - stdin, err := readStdin() + schema = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to read stdin", err) - } - if len(stdin) == 0 { - return errors.New("no schema in stdin provided") - } else { - schema = stdin + return err } - } else { + schema = string(data) + case len(args) > 0: schema = args[0] + default: + return fmt.Errorf("schema cannot be empty") } - if schema == "" { - return errors.New("empty schema provided") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) + cols, err := store.AddSchema(cmd.Context(), schema) if err != nil { - return errors.Wrap("join paths failed", err) - } - - res, err := http.Post(endpoint.String(), "text", strings.NewReader(schema)) - if err != nil { - return errors.Wrap("failed to post schema", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return errors.Wrap("failed to handle GraphQL errors", err) - } - if graphlErr { - indentedResult, err := indentJSON(response) - if err != nil { - return errors.Wrap("failed to pretty print result", err) - } - log.FeedbackError(cmd.Context(), indentedResult) - } else { - type schemaResponse struct { - Data struct { - Result string `json:"result"` - Collections []struct { - Name string `json:"name"` - ID string `json:"id"` - } `json:"collections"` - } `json:"data"` - } - r := schemaResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("failed to unmarshal response", err) - } - if r.Data.Result == "success" { - log.FeedbackInfo(cmd.Context(), "Successfully added schema.", logging.NewKV("Collections", r.Data.Collections)) - } - log.FeedbackInfo(cmd.Context(), r.Data.Result) - } + return err } - return nil + return writeJSON(cmd, cols) }, } cmd.Flags().StringVarP(&schemaFile, "file", "f", "", "File to load a schema from") diff --git a/cli/schema_list.go b/cli/schema_list.go deleted file mode 100644 index 3a0e32bcce..0000000000 --- a/cli/schema_list.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -type schemaListResponse struct { - Data struct { - Collections []struct { - Name string `json:"name"` - ID string `json:"id"` - VersionID string `json:"version_id"` - Fields []struct { - ID string `json:"id"` - Name string `json:"name"` - Kind string `json:"kind"` - Internal bool `json:"internal"` - } `json:"fields"` - } `json:"collections"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeSchemaListCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "list", - Short: "List schema types with their respective fields", - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - defer res.Body.Close() //nolint:errcheck - - data, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - var r schemaListResponse - if err := json.Unmarshal(data, &r); err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - return errors.New("failed to list schemas", errors.NewKV("errors", r.Errors)) - } - - for _, c := range r.Data.Collections { - cmd.Printf("# Schema ID: %s\n", c.ID) - cmd.Printf("# Version ID: %s\n", c.VersionID) - cmd.Printf("type %s {\n", c.Name) - for _, f := range c.Fields { - if !f.Internal { - cmd.Printf("\t%s: %s\n", f.Name, f.Kind) - } - } - cmd.Printf("}\n\n") - } - - return nil - }, - } - return cmd -} diff --git a/cli/schema_migration_down.go b/cli/schema_migration_down.go new file mode 100644 index 0000000000..1dcb5e64da --- /dev/null +++ b/cli/schema_migration_down.go @@ -0,0 +1,91 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/sourcenetwork/immutable/enumerable" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationDownCommand() *cobra.Command { + var file string + var schemaVersionID string + var cmd = &cobra.Command{ + Use: "down --version ", + Short: "Reverses the migration from the specified schema version.", + Long: `Reverses the migration from the specified schema version. +Documents is a list of documents to reverse the migration from. + +Example: migrate from string + defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration down --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration down --version bae123 - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + var srcData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + srcData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + srcData = data + case len(args) == 1: + srcData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var src []map[string]any + if err := json.Unmarshal(srcData, &src); err != nil { + return err + } + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), schemaVersionID) + if err != nil { + return err + } + var value []map[string]any + err = enumerable.ForEach(out, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + return err + } + return writeJSON(cmd, value) + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + return cmd +} diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go index 333c2d9cf4..43b66599b7 100644 --- a/cli/schema_migration_get.go +++ b/cli/schema_migration_get.go @@ -11,21 +11,10 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaMigrationGetCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaMigrationGetCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "get", Short: "Gets the schema migrations within DefraDB", @@ -35,63 +24,14 @@ Example: defradb client schema migration get' Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if err := cobra.NoArgs(cmd, args); err != nil { - return NewErrTooManyArgs(0, len(args)) - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) - if err != nil { - return errors.Wrap("join paths failed", err) - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - res, err := http.Get(endpoint.String()) + cfgs, err := store.LensRegistry().Config(cmd.Context()) if err != nil { - return errors.Wrap("failed to get schema migrations", err) + return err } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type migrationGetResponse struct { - Data struct { - Configuration []client.LensConfig `json:"configuration"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := migrationGetResponse{} - err = json.Unmarshal(response, &r) - log.FeedbackInfo(cmd.Context(), string(response)) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to get schema migrations", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully got schema migrations", - logging.NewKV("Configuration", r.Data.Configuration)) - } - } - - return nil + return writeJSON(cmd, cfgs) }, } return cmd diff --git a/cli/schema_migration_reload.go b/cli/schema_migration_reload.go new file mode 100644 index 0000000000..d04aebed65 --- /dev/null +++ b/cli/schema_migration_reload.go @@ -0,0 +1,35 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationReloadCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "reload", + Short: "Reload the schema migrations within DefraDB", + Long: `Reload the schema migrations within DefraDB`, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + return lens.ReloadLenses(cmd.Context()) + }, + } + return cmd +} diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go index 633cbf0115..280130b8db 100644 --- a/cli/schema_migration_set.go +++ b/cli/schema_migration_set.go @@ -13,21 +13,16 @@ package cli import ( "encoding/json" "io" - "net/http" "os" "strings" "github.com/lens-vm/lens/host-go/config/model" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaMigrationSetCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaMigrationSetCommand() *cobra.Command { var lensFile string var cmd = &cobra.Command{ Use: "set [src] [dst] [cfg]", @@ -44,73 +39,39 @@ Example: add from stdin: cat schema_migration.lens | defradb client schema migration set bae123 bae456 - Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if err := cobra.MinimumNArgs(2)(cmd, args); err != nil { - return NewErrMissingArgs([]string{"src", "dst", "cfg"}) - } - if err := cobra.MaximumNArgs(3)(cmd, args); err != nil { - return NewErrTooManyArgs(3, len(args)) - } + Args: cobra.RangeArgs(2, 3), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) var lensCfgJson string - var srcSchemaVersionID string - var dstSchemaVersionID string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if lensFile != "" { - buf, err := os.ReadFile(lensFile) + switch { + case lensFile != "": + data, err := os.ReadFile(lensFile) if err != nil { - return errors.Wrap("failed to read schema file", err) + return err } - lensCfgJson = string(buf) - } else if len(args) == 2 { - // If the lensFile flag has not been provided then it must be provided as an arg - // and thus len(args) cannot be 2 - return NewErrMissingArg("cfg") - } else if isFileInfoPipe(fi) && args[2] != "-" { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema migration set -' to read from stdin."+ - " Example: 'cat schema_migration.lens | defradb client schema migration set -').", - ) - return nil - } else if args[2] == "-" { - stdin, err := readStdin() + lensCfgJson = string(data) + case len(args) == 3 && args[2] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to read stdin", err) + return err } - if len(stdin) == 0 { - return errors.New("no lens cfg in stdin provided") - } else { - lensCfgJson = stdin - } - } else { + lensCfgJson = string(data) + case len(args) == 3: lensCfgJson = args[2] + default: + return ErrNoLensConfig } - srcSchemaVersionID = args[0] - dstSchemaVersionID = args[1] - - if lensCfgJson == "" { - return NewErrMissingArg("cfg") - } - if srcSchemaVersionID == "" { - return NewErrMissingArg("src") - } - if dstSchemaVersionID == "" { - return NewErrMissingArg("dst") - } + srcSchemaVersionID := args[0] + dstSchemaVersionID := args[1] decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) decoder.DisallowUnknownFields() var lensCfg model.Lens - err = decoder.Decode(&lensCfg) - if err != nil { - return errors.Wrap("invalid lens configuration", err) + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) } migrationCfg := client.LensConfig{ @@ -119,58 +80,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw Lens: lensCfg, } - migrationCfgJson, err := json.Marshal(migrationCfg) - if err != nil { - return errors.Wrap("failed to marshal cfg", err) - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) - if err != nil { - return errors.Wrap("join paths failed", err) - } - - res, err := http.Post(endpoint.String(), "application/json", strings.NewReader(string(migrationCfgJson))) - if err != nil { - return errors.Wrap("failed to post schema migration", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type migrationSetResponse struct { - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := migrationSetResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to set schema migration", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully set schema migration") - } - } - - return nil + return store.LensRegistry().SetMigration(cmd.Context(), migrationCfg) }, } cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") diff --git a/cli/schema_migration_up.go b/cli/schema_migration_up.go new file mode 100644 index 0000000000..3b0b522349 --- /dev/null +++ b/cli/schema_migration_up.go @@ -0,0 +1,91 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/sourcenetwork/immutable/enumerable" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationUpCommand() *cobra.Command { + var file string + var schemaVersionID string + var cmd = &cobra.Command{ + Use: "up --version ", + Short: "Applies the migration to the specified schema version.", + Long: `Applies the migration to the specified schema version. +Documents is a list of documents to apply the migration to. + +Example: migrate from string + defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration up --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration up --version bae123 - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + var srcData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + srcData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + srcData = data + case len(args) == 1: + srcData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var src []map[string]any + if err := json.Unmarshal(srcData, &src); err != nil { + return err + } + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), schemaVersionID) + if err != nil { + return err + } + var value []map[string]any + err = enumerable.ForEach(out, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + return err + } + return writeJSON(cmd, value) + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + return cmd +} diff --git a/cli/schema_patch.go b/cli/schema_patch.go index b1e962c51a..70f4283c85 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -11,21 +11,16 @@ package cli import ( - "encoding/json" + "fmt" "io" - "net/http" "os" - "strings" "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" ) -func MakeSchemaPatchCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaPatchCommand() *cobra.Command { var patchFile string - + var setDefault bool var cmd = &cobra.Command{ Use: "patch [schema]", Short: "Patch an existing schema type", @@ -43,113 +38,33 @@ Example: patch from stdin: cat patch.json | defradb client schema patch - To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var patch string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if len(args) > 1 { - if err = cmd.Usage(); err != nil { - return err - } - return NewErrTooManyArgs(1, len(args)) - } - - if patchFile != "" { - buf, err := os.ReadFile(patchFile) + var patch string + switch { + case patchFile != "": + data, err := os.ReadFile(patchFile) if err != nil { - return NewFailedToReadFile(err) + return err } - patch = string(buf) - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema patch -' to read from stdin."+ - " Example: 'cat patch.json | defradb client schema patch -').", - ) - return nil - } else if len(args) == 0 { - // ignore error, nothing we can do about it - // as printing an error about failing to print help - // is useless - //nolint:errcheck - cmd.Help() - return nil - } else if args[0] == "-" { - stdin, err := readStdin() + patch = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return NewFailedToReadStdin(err) - } - if len(stdin) == 0 { - return ErrEmptyStdin - } else { - patch = stdin + return err } - } else { + patch = string(data) + case len(args) > 0: patch = args[0] + default: + return fmt.Errorf("patch cannot be empty") } - if patch == "" { - return ErrEmptyFile - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) - if err != nil { - return err - } - - req, err := http.NewRequest(http.MethodPatch, endpoint.String(), strings.NewReader(patch)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - //nolint:errcheck - defer res.Body.Close() - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return NewErrFailedToStatStdOut(err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return NewErrFailedToHandleGQLErrors(err) - } - if graphlErr { - indentedResult, err := indentJSON(response) - if err != nil { - return NewErrFailedToPrettyPrintResponse(err) - } - log.FeedbackError(cmd.Context(), indentedResult) - } else { - type schemaResponse struct { - Data struct { - Result string `json:"result"` - } `json:"data"` - } - r := schemaResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Result) - } - } - return nil + return store.PatchSchema(cmd.Context(), patch, setDefault) }, } + cmd.Flags().BoolVar(&setDefault, "set-default", false, "Set default schema version") cmd.Flags().StringVarP(&patchFile, "file", "f", "", "File to load a patch from") return cmd } diff --git a/cli/schema_set_default.go b/cli/schema_set_default.go new file mode 100644 index 0000000000..cdb6bd8bd8 --- /dev/null +++ b/cli/schema_set_default.go @@ -0,0 +1,29 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeSchemaSetDefaultCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "set-default [versionID]", + Short: "Set the default schema version", + Long: `Set the default schema version`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + return store.SetDefaultSchemaVersion(cmd.Context(), args[0]) + }, + } + return cmd +} diff --git a/cli/serverdump.go b/cli/server_dump.go similarity index 100% rename from cli/serverdump.go rename to cli/server_dump.go diff --git a/cli/start.go b/cli/start.go index 9185af8c92..c3b869fbf8 100644 --- a/cli/start.go +++ b/cli/start.go @@ -28,19 +28,21 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/keepalive" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" ds "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + httpapi "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" ) +const badgerDatastoreName = "badger" + func MakeStartCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "start", @@ -48,27 +50,11 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { Long: "Start a DefraDB node.", // Load the root config if it exists, otherwise create it. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { + if err := loadConfig(cfg); err != nil { return err } - if cfg.ConfigFileExists() { - if err := cfg.LoadWithRootdir(true); err != nil { - return config.NewErrLoadingConfig(err) - } - log.FeedbackInfo(cmd.Context(), fmt.Sprintf("Configuration loaded from DefraDB directory %v", cfg.Rootdir)) - } else { - if err := cfg.LoadWithRootdir(false); err != nil { - return config.NewErrLoadingConfig(err) - } - if config.FolderExists(cfg.Rootdir) { - if err := cfg.WriteConfigFile(); err != nil { - return err - } - } else { - if err := cfg.CreateRootDirAndConfigFile(); err != nil { - return err - } - } + if !cfg.ConfigFileExists() { + return createConfig(cfg) } return nil }, @@ -351,16 +337,7 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { // run the server in a separate goroutine go func() { - log.FeedbackInfo( - ctx, - fmt.Sprintf( - "Providing HTTP API at %s%s. Use the GraphQL request endpoint at %s%s/graphql ", - cfg.API.AddressToURL(), - httpapi.RootPath, - cfg.API.AddressToURL(), - httpapi.RootPath, - ), - ) + log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", cfg.API.AddressToURL())) if err := s.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err) if n != nil { diff --git a/api/http/http.go b/cli/tx.go similarity index 51% rename from api/http/http.go rename to cli/tx.go index 3ac3d62bdd..b4d278df6d 100644 --- a/api/http/http.go +++ b/cli/tx.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,11 +8,18 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -/* -Package http provides DefraDB's HTTP API, offering various capabilities. -*/ -package http +package cli -import "github.com/sourcenetwork/defradb/logging" +import ( + "github.com/spf13/cobra" +) -var log = logging.MustNewLogger("http") +func MakeTxCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "tx", + Short: "Create, commit, and discard DefraDB transactions", + Long: `Create, commit, and discard DefraDB transactions`, + } + + return cmd +} diff --git a/cli/tx_commit.go b/cli/tx_commit.go new file mode 100644 index 0000000000..260a274a08 --- /dev/null +++ b/cli/tx_commit.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "strconv" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/http" +) + +func MakeTxCommitCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "commit [id]", + Short: "Commit a DefraDB transaction.", + Long: `Commit a DefraDB transaction.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + tx, err := http.NewTransaction(cfg.API.Address, id) + if err != nil { + return err + } + return tx.Commit(cmd.Context()) + }, + } + return cmd +} diff --git a/cli/tx_create.go b/cli/tx_create.go new file mode 100644 index 0000000000..987a784077 --- /dev/null +++ b/cli/tx_create.go @@ -0,0 +1,46 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeTxCreateCommand(cfg *config.Config) *cobra.Command { + var concurrent bool + var readOnly bool + var cmd = &cobra.Command{ + Use: "create", + Short: "Create a new DefraDB transaction.", + Long: `Create a new DefraDB transaction.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + db := cmd.Context().Value(dbContextKey).(client.DB) + + var tx datastore.Txn + if concurrent { + tx, err = db.NewConcurrentTxn(cmd.Context(), readOnly) + } else { + tx, err = db.NewTxn(cmd.Context(), readOnly) + } + if err != nil { + return err + } + return writeJSON(cmd, map[string]any{"id": tx.ID()}) + }, + } + cmd.Flags().BoolVar(&concurrent, "concurrent", false, "Transaction is concurrent") + cmd.Flags().BoolVar(&readOnly, "read-only", false, "Transaction is read only") + return cmd +} diff --git a/cli/tx_discard.go b/cli/tx_discard.go new file mode 100644 index 0000000000..351f919f53 --- /dev/null +++ b/cli/tx_discard.go @@ -0,0 +1,42 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "strconv" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/http" +) + +func MakeTxDiscardCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "discard [id]", + Short: "Discard a DefraDB transaction.", + Long: `Discard a DefraDB transaction.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + tx, err := http.NewTransaction(cfg.API.Address, id) + if err != nil { + return err + } + tx.Discard(cmd.Context()) + return nil + }, + } + return cmd +} diff --git a/cli/utils.go b/cli/utils.go new file mode 100644 index 0000000000..b9e4d1a710 --- /dev/null +++ b/cli/utils.go @@ -0,0 +1,112 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/http" +) + +type contextKey string + +var ( + // txContextKey is the context key for the datastore.Txn + // + // This will only be set if a transaction id is specified. + txContextKey = contextKey("tx") + // dbContextKey is the context key for the client.DB + dbContextKey = contextKey("db") + // storeContextKey is the context key for the client.Store + // + // If a transaction exists, all operations will be executed + // in the current transaction context. + storeContextKey = contextKey("store") + // colContextKey is the context key for the client.Collection + // + // If a transaction exists, all operations will be executed + // in the current transaction context. + colContextKey = contextKey("col") +) + +// mustGetStoreContext returns the store for the current command context. +// +// If a store is not set in the current context this function panics. +func mustGetStoreContext(cmd *cobra.Command) client.Store { + return cmd.Context().Value(storeContextKey).(client.Store) +} + +// tryGetCollectionContext returns the collection for the current command context +// and a boolean indicating if the collection was set. +func tryGetCollectionContext(cmd *cobra.Command) (client.Collection, bool) { + col, ok := cmd.Context().Value(colContextKey).(client.Collection) + return col, ok +} + +// setTransactionContext sets the transaction for the current command context. +func setTransactionContext(cmd *cobra.Command, cfg *config.Config, txId uint64) error { + if txId == 0 { + return nil + } + tx, err := http.NewTransaction(cfg.API.Address, txId) + if err != nil { + return err + } + ctx := context.WithValue(cmd.Context(), txContextKey, tx) + cmd.SetContext(ctx) + return nil +} + +// setStoreContext sets the store for the current command context. +func setStoreContext(cmd *cobra.Command, cfg *config.Config) error { + db, err := http.NewClient(cfg.API.Address) + if err != nil { + return err + } + ctx := context.WithValue(cmd.Context(), dbContextKey, db) + if tx, ok := ctx.Value(txContextKey).(datastore.Txn); ok { + ctx = context.WithValue(ctx, storeContextKey, db.WithTxn(tx)) + } else { + ctx = context.WithValue(ctx, storeContextKey, db) + } + cmd.SetContext(ctx) + return nil +} + +// loadConfig loads the rootDir containing the configuration file, +// otherwise warn about it and load a default configuration. +func loadConfig(cfg *config.Config) error { + if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { + return err + } + return cfg.LoadWithRootdir(cfg.ConfigFileExists()) +} + +// createConfig creates the config directories and writes +// the current config to a file. +func createConfig(cfg *config.Config) error { + if config.FolderExists(cfg.Rootdir) { + return cfg.WriteConfigFile() + } + return cfg.CreateRootDirAndConfigFile() +} + +func writeJSON(cmd *cobra.Command, out any) error { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(out) +} diff --git a/cli/version.go b/cli/version.go index 8842697699..f61ecbc9d5 100644 --- a/cli/version.go +++ b/cli/version.go @@ -11,9 +11,7 @@ package cli import ( - "bytes" - - "encoding/json" + "strings" "github.com/spf13/cobra" @@ -31,25 +29,17 @@ func MakeVersionCommand() *cobra.Command { if err != nil { return err } - switch format { - case "json": - var buf bytes.Buffer - dvj, err := json.Marshal(dv) - if err != nil { - return err - } - err = json.Indent(&buf, dvj, "", " ") - if err != nil { - return err - } - cmd.Println(buf.String()) - default: - if full { - cmd.Println(dv.StringFull()) - } else { - cmd.Println(dv.String()) - } + + if strings.ToLower(format) == "json" { + return writeJSON(cmd, dv) } + + if full { + cmd.Println(dv.StringFull()) + } else { + cmd.Println(dv.String()) + } + return nil }, } diff --git a/client/document.go b/client/document.go index c48ccfce88..bcb8ae6070 100644 --- a/client/document.go +++ b/client/document.go @@ -398,6 +398,26 @@ func (doc *Document) ToMap() (map[string]any, error) { return doc.toMapWithKey() } +// ToJSONPatch returns a json patch that can be used to update +// a document by calling SetWithJSON. +func (doc *Document) ToJSONPatch() ([]byte, error) { + docMap, err := doc.toMap() + if err != nil { + return nil, err + } + + for field, value := range doc.Values() { + if !value.IsDirty() { + delete(docMap, field.Name()) + } + if value.IsDelete() { + docMap[field.Name()] = nil + } + } + + return json.Marshal(docMap) +} + // Clean cleans the document by removing all dirty fields. func (doc *Document) Clean() { for _, v := range doc.Fields() { diff --git a/cmd/defradb/main.go b/cmd/defradb/main.go index 761666bea7..2406885a76 100644 --- a/cmd/defradb/main.go +++ b/cmd/defradb/main.go @@ -12,7 +12,6 @@ package main import ( - "context" "os" "github.com/sourcenetwork/defradb/cli" @@ -21,10 +20,13 @@ import ( // Execute adds all child commands to the root command and sets flags appropriately. func main() { - cfg := config.DefaultConfig() - ctx := context.Background() - defraCmd := cli.NewDefraCommand(cfg) - if err := defraCmd.Execute(ctx); err != nil { + defraCmd := cli.NewDefraCommand(config.DefaultConfig()) + if err := defraCmd.Execute(); err != nil { + // this error is okay to discard because cobra + // logs any errors encountered during execution + // + // exiting with a non-zero status code signals + // that an error has ocurred during execution os.Exit(1) } } diff --git a/cmd/genclidocs/genclidocs.go b/cmd/genclidocs/main.go similarity index 59% rename from cmd/genclidocs/genclidocs.go rename to cmd/genclidocs/main.go index bccc96b38c..f556c26d20 100644 --- a/cmd/genclidocs/genclidocs.go +++ b/cmd/genclidocs/main.go @@ -14,30 +14,33 @@ genclidocs is a tool to generate the command line interface documentation. package main import ( - "context" "flag" + "log" "os" "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) -var log = logging.MustNewLogger("genclidocs") +var path string + +func init() { + flag.StringVar(&path, "o", "docs/cmd", "path to write the cmd docs to") +} func main() { - path := flag.String("o", "docs/cmd", "path to write the cmd docs to") flag.Parse() - err := os.MkdirAll(*path, os.ModePerm) - if err != nil { - log.FatalE(context.Background(), "Creating the filesystem path failed", err) - } + defraCmd := cli.NewDefraCommand(config.DefaultConfig()) - defraCmd.RootCmd.DisableAutoGenTag = true - err = doc.GenMarkdownTree(defraCmd.RootCmd, *path) - if err != nil { - log.FatalE(context.Background(), "Generating cmd docs failed", err) + defraCmd.DisableAutoGenTag = true + + if err := os.MkdirAll(path, os.ModePerm); err != nil { + log.Fatal("Creating the filesystem path failed", err) + } + + if err := doc.GenMarkdownTree(defraCmd, path); err != nil { + log.Fatal("Generating cmd docs failed", err) } } diff --git a/cmd/genmanpages/main.go b/cmd/genmanpages/main.go index 7ec7a3ce59..1a9b43df7c 100644 --- a/cmd/genmanpages/main.go +++ b/cmd/genmanpages/main.go @@ -15,40 +15,39 @@ installation is packaging and system dependent. package main import ( - "context" "flag" + "log" "os" "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) const defaultPerm os.FileMode = 0o777 -var log = logging.MustNewLogger("genmanpages") +var dir string + +var header = &doc.GenManHeader{ + Title: "defradb - Peer-to-Peer Edge Database", + Section: "1", +} + +func init() { + flag.StringVar(&dir, "o", "build/man", "Directory in which to generate DefraDB man pages") +} func main() { - dirFlag := flag.String("o", "build/man", "Directory in which to generate DefraDB man pages") flag.Parse() - genRootManPages(*dirFlag) -} -func genRootManPages(dir string) { - ctx := context.Background() - header := &doc.GenManHeader{ - Title: "defradb - Peer-to-Peer Edge Database", - Section: "1", - } - err := os.MkdirAll(dir, defaultPerm) - if err != nil { - log.FatalE(ctx, "Failed to create directory", err, logging.NewKV("dir", dir)) - } defraCmd := cli.NewDefraCommand(config.DefaultConfig()) - err = doc.GenManTree(defraCmd.RootCmd, header, dir) - if err != nil { - log.FatalE(ctx, "Failed generation of man pages", err) + + if err := os.MkdirAll(dir, defaultPerm); err != nil { + log.Fatal("Failed to create directory", err) + } + + if err := doc.GenManTree(defraCmd, header, dir); err != nil { + log.Fatal("Failed generation of man pages", err) } } diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index 7173befb6b..b538592ccc 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -10,7 +10,8 @@ Execute queries, add schema types, obtain node info, etc. ### Options ``` - -h, --help help for client + -h, --help help for client + --tx uint Transaction ID ``` ### Options inherited from parent commands @@ -30,12 +31,12 @@ Execute queries, add schema types, obtain node info, etc. * [defradb](defradb.md) - DefraDB Edge Database * [defradb client backup](defradb_client_backup.md) - Interact with the backup utility -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore +* [defradb client collection](defradb_client_collection.md) - View detailed collection info. +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. * [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side * [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance -* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the node -* [defradb client ping](defradb_client_ping.md) - Ping to test connection with a node +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system * [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC * [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md index baa08725e1..77e111795d 100644 --- a/docs/cli/defradb_client_backup.md +++ b/docs/cli/defradb_client_backup.md @@ -23,6 +23,7 @@ Currently only supports JSON format. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md index ea8a22d634..b7547ea641 100644 --- a/docs/cli/defradb_client_backup_export.md +++ b/docs/cli/defradb_client_backup_export.md @@ -37,6 +37,7 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md new file mode 100644 index 0000000000..2a1e9058be --- /dev/null +++ b/docs/cli/defradb_client_collection.md @@ -0,0 +1,52 @@ +## defradb client collection + +View detailed collection info. + +### Synopsis + +View detailed collection info. + +Example: view all collections + defradb client collection + +Example: view collection by name + defradb client collection --name User + +Example: view collection by schema id + defradb client collection --schema bae123 + +Example: view collection by version id + defradb client collection --version bae123 + + +``` +defradb client collection [--name --schema --version ] [flags] +``` + +### Options + +``` + -h, --help help for collection + --name string Get collection by name + --schema string Get collection by schema ID + --version string Get collection by version ID +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node + diff --git a/docs/cli/defradb_client_document.md b/docs/cli/defradb_client_document.md new file mode 100644 index 0000000000..bc527357e7 --- /dev/null +++ b/docs/cli/defradb_client_document.md @@ -0,0 +1,38 @@ +## defradb client document + +Create, read, update, and delete documents. + +### Synopsis + +Create, read, update, and delete documents. + +### Options + +``` + -h, --help help for document +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client document create](defradb_client_document_create.md) - Create a new document. +* [defradb client document delete](defradb_client_document_delete.md) - Delete documents by key or filter. +* [defradb client document get](defradb_client_document_get.md) - View detailed document info. +* [defradb client document keys](defradb_client_document_keys.md) - List all collection document keys. +* [defradb client document save](defradb_client_document_save.md) - Create or update a document. +* [defradb client document update](defradb_client_document_update.md) - Update documents by key or filter. + diff --git a/docs/cli/defradb_client_document_create.md b/docs/cli/defradb_client_document_create.md new file mode 100644 index 0000000000..99dbd0d7f5 --- /dev/null +++ b/docs/cli/defradb_client_document_create.md @@ -0,0 +1,44 @@ +## defradb client document create + +Create a new document. + +### Synopsis + +Create a new document. + +Example: create document + defradb client document create --collection User '{ "name": "Bob" }' + +Example: create documents + defradb client document create --collection User '[{ "name": "Alice" }, { "name": "Bob" }]' + + +``` +defradb client document create --collection [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_delete.md b/docs/cli/defradb_client_document_delete.md new file mode 100644 index 0000000000..96a0b1e973 --- /dev/null +++ b/docs/cli/defradb_client_document_delete.md @@ -0,0 +1,46 @@ +## defradb client document delete + +Delete documents by key or filter. + +### Synopsis + +Delete documents by key or filter and lists the number of documents deleted. + +Example: delete by key(s) + defradb client document delete --collection User --key bae-123,bae-456 + +Example: delete by filter + defradb client document delete --collection User --filter '{ "_gte": { "points": 100 } }' + + +``` +defradb client document delete --collection [--filter --key ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --filter string Document filter + -h, --help help for delete + --key strings Document key +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_get.md b/docs/cli/defradb_client_document_get.md new file mode 100644 index 0000000000..600712ec0b --- /dev/null +++ b/docs/cli/defradb_client_document_get.md @@ -0,0 +1,42 @@ +## defradb client document get + +View detailed document info. + +### Synopsis + +View detailed document info. + +Example: + defradb client document get --collection User bae-123 + + +``` +defradb client document get --collection [--show-deleted] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for get + --show-deleted Show deleted documents +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_keys.md b/docs/cli/defradb_client_document_keys.md new file mode 100644 index 0000000000..e436f4df6b --- /dev/null +++ b/docs/cli/defradb_client_document_keys.md @@ -0,0 +1,41 @@ +## defradb client document keys + +List all collection document keys. + +### Synopsis + +List all collection document keys. + +Example: + defradb client document keys --collection User keys + + +``` +defradb client document keys --collection [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for keys +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_save.md b/docs/cli/defradb_client_document_save.md new file mode 100644 index 0000000000..41f59a860c --- /dev/null +++ b/docs/cli/defradb_client_document_save.md @@ -0,0 +1,42 @@ +## defradb client document save + +Create or update a document. + +### Synopsis + +Create or update a document. + +Example: + defradb client document save --collection User --key bae-123 '{ "name": "Bob" }' + + +``` +defradb client document save --collection --key [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for save + --key string Document key +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_update.md b/docs/cli/defradb_client_document_update.md new file mode 100644 index 0000000000..3efc67ebf0 --- /dev/null +++ b/docs/cli/defradb_client_document_update.md @@ -0,0 +1,52 @@ +## defradb client document update + +Update documents by key or filter. + +### Synopsis + +Update documents by key or filter. + +Example: + defradb client document update --collection User --key bae-123 '{ "name": "Bob" }' + +Example: update by filter + defradb client document update --collection User \ + --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }' + +Example: update by keys + defradb client document update --collection User \ + --key bae-123,bae-456 --updater '{ "verified": true }' + + +``` +defradb client document update --collection [--filter --key --updater ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --filter string Document filter + -h, --help help for update + --key strings Document key + --updater string Document updater +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index 862154bc17..3ebd35343c 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -22,6 +22,7 @@ defradb client dump [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md index 4babb57d46..a876bbcc4f 100644 --- a/docs/cli/defradb_client_index.md +++ b/docs/cli/defradb_client_index.md @@ -22,6 +22,7 @@ Manage (create, drop, or list) collection indexes on a DefraDB node. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md index 7f67e58075..96b6418440 100644 --- a/docs/cli/defradb_client_index_create.md +++ b/docs/cli/defradb_client_index_create.md @@ -22,7 +22,7 @@ defradb client index create -c --collection --fields [-n - ``` -c, --collection string Collection name - --fields string Fields to index + --fields strings Fields to index -h, --help help for create -n, --name string Index name ``` @@ -37,6 +37,7 @@ defradb client index create -c --collection --fields [-n - --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md index f551fe4658..c5171b756e 100644 --- a/docs/cli/defradb_client_index_drop.md +++ b/docs/cli/defradb_client_index_drop.md @@ -31,6 +31,7 @@ defradb client index drop -c --collection -n --name [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md index bf434d30f2..c7e96d4e4f 100644 --- a/docs/cli/defradb_client_index_list.md +++ b/docs/cli/defradb_client_index_list.md @@ -33,6 +33,7 @@ defradb client index list [-c --collection ] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_rpc.md b/docs/cli/defradb_client_p2p.md similarity index 70% rename from docs/cli/defradb_client_rpc.md rename to docs/cli/defradb_client_p2p.md index d7046433c5..1132ee22ad 100644 --- a/docs/cli/defradb_client_rpc.md +++ b/docs/cli/defradb_client_p2p.md @@ -1,16 +1,15 @@ -## defradb client rpc +## defradb client p2p -Interact with a DefraDB node via RPC +Interact with the DefraDB P2P system ### Synopsis -Interact with a DefraDB node via RPC. +Interact with the DefraDB P2P system ### Options ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") - -h, --help help for rpc + -h, --help help for p2p ``` ### Options inherited from parent commands @@ -23,12 +22,13 @@ Interact with a DefraDB node via RPC. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_p2pcollection.md b/docs/cli/defradb_client_p2p_collection.md similarity index 62% rename from docs/cli/defradb_client_rpc_p2pcollection.md rename to docs/cli/defradb_client_p2p_collection.md index ede32521d4..6fec3171da 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection.md +++ b/docs/cli/defradb_client_p2p_collection.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection +## defradb client p2p collection Configure the P2P collection system @@ -10,13 +10,12 @@ The selected collections synchronize their events on the pubsub network. ### Options ``` - -h, --help help for p2pcollection + -h, --help help for collection ``` ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -24,13 +23,14 @@ The selected collections synchronize their events on the pubsub network. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC -* [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections -* [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections -* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Remove P2P collections +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system +* [defradb client p2p collection add](defradb_client_p2p_collection_add.md) - Add P2P collections +* [defradb client p2p collection getall](defradb_client_p2p_collection_getall.md) - Get all P2P collections +* [defradb client p2p collection remove](defradb_client_p2p_collection_remove.md) - Remove P2P collections diff --git a/docs/cli/defradb_client_rpc_p2pcollection_add.md b/docs/cli/defradb_client_p2p_collection_add.md similarity index 77% rename from docs/cli/defradb_client_rpc_p2pcollection_add.md rename to docs/cli/defradb_client_p2p_collection_add.md index 92ac0d82e6..b5f3586144 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_add.md +++ b/docs/cli/defradb_client_p2p_collection_add.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection add +## defradb client p2p collection add Add P2P collections @@ -8,7 +8,7 @@ Add P2P collections to the synchronized pubsub topics. The collections are synchronized between nodes of a pubsub network. ``` -defradb client rpc p2pcollection add [collectionID] [flags] +defradb client p2p collection add [collectionID] [flags] ``` ### Options @@ -20,7 +20,6 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +27,11 @@ defradb client rpc p2pcollection add [collectionID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md similarity index 78% rename from docs/cli/defradb_client_rpc_p2pcollection_getall.md rename to docs/cli/defradb_client_p2p_collection_getall.md index 946a2e0156..46fcefc407 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_getall.md +++ b/docs/cli/defradb_client_p2p_collection_getall.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection getall +## defradb client p2p collection getall Get all P2P collections @@ -8,7 +8,7 @@ Get all P2P collections in the pubsub topics. This is the list of collections of the node that are synchronized on the pubsub network. ``` -defradb client rpc p2pcollection getall [flags] +defradb client p2p collection getall [flags] ``` ### Options @@ -20,7 +20,6 @@ defradb client rpc p2pcollection getall [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +27,11 @@ defradb client rpc p2pcollection getall [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md similarity index 77% rename from docs/cli/defradb_client_rpc_p2pcollection_remove.md rename to docs/cli/defradb_client_p2p_collection_remove.md index 77658b4d50..04492d2871 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_remove.md +++ b/docs/cli/defradb_client_p2p_collection_remove.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection remove +## defradb client p2p collection remove Remove P2P collections @@ -8,7 +8,7 @@ Remove P2P collections from the followed pubsub topics. The removed collections will no longer be synchronized between nodes. ``` -defradb client rpc p2pcollection remove [collectionID] [flags] +defradb client p2p collection remove [collectionID] [flags] ``` ### Options @@ -20,7 +20,6 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +27,11 @@ defradb client rpc p2pcollection remove [collectionID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_replicator.md b/docs/cli/defradb_client_p2p_replicator.md similarity index 75% rename from docs/cli/defradb_client_rpc_replicator.md rename to docs/cli/defradb_client_p2p_replicator.md index e88933791c..26f4041802 100644 --- a/docs/cli/defradb_client_rpc_replicator.md +++ b/docs/cli/defradb_client_p2p_replicator.md @@ -1,4 +1,4 @@ -## defradb client rpc replicator +## defradb client p2p replicator Configure the replicator system @@ -16,7 +16,6 @@ A replicator replicates one or all collection(s) from one node to another. ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -24,13 +23,14 @@ A replicator replicates one or all collection(s) from one node to another. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC -* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator. It will stop synchronizing -* [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators -* [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system +* [defradb client p2p replicator delete](defradb_client_p2p_replicator_delete.md) - Delete a replicator. It will stop synchronizing +* [defradb client p2p replicator getall](defradb_client_p2p_replicator_getall.md) - Get all replicators +* [defradb client p2p replicator set](defradb_client_p2p_replicator_set.md) - Set a P2P replicator diff --git a/docs/cli/defradb_client_ping.md b/docs/cli/defradb_client_p2p_replicator_delete.md similarity index 67% rename from docs/cli/defradb_client_ping.md rename to docs/cli/defradb_client_p2p_replicator_delete.md index 8edd7aff94..9ffbc115d3 100644 --- a/docs/cli/defradb_client_ping.md +++ b/docs/cli/defradb_client_p2p_replicator_delete.md @@ -1,15 +1,19 @@ -## defradb client ping +## defradb client p2p replicator delete -Ping to test connection with a node +Delete a replicator. It will stop synchronizing + +### Synopsis + +Delete a replicator. It will stop synchronizing. ``` -defradb client ping [flags] +defradb client p2p replicator delete [flags] ``` ### Options ``` - -h, --help help for ping + -h, --help help for delete ``` ### Options inherited from parent commands @@ -22,10 +26,11 @@ defradb client ping [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md similarity index 82% rename from docs/cli/defradb_client_rpc_replicator_getall.md rename to docs/cli/defradb_client_p2p_replicator_getall.md index 2449dba1fd..080011ae65 100644 --- a/docs/cli/defradb_client_rpc_replicator_getall.md +++ b/docs/cli/defradb_client_p2p_replicator_getall.md @@ -1,4 +1,4 @@ -## defradb client rpc replicator getall +## defradb client p2p replicator getall Get all replicators @@ -8,7 +8,7 @@ Get all the replicators active in the P2P data sync system. These are the replicators that are currently replicating data from one node to another. ``` -defradb client rpc replicator getall [flags] +defradb client p2p replicator getall [flags] ``` ### Options @@ -20,7 +20,6 @@ defradb client rpc replicator getall [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +27,11 @@ defradb client rpc replicator getall [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md similarity index 68% rename from docs/cli/defradb_client_rpc_replicator_set.md rename to docs/cli/defradb_client_p2p_replicator_set.md index 24b7add648..23d7b81404 100644 --- a/docs/cli/defradb_client_rpc_replicator_set.md +++ b/docs/cli/defradb_client_p2p_replicator_set.md @@ -1,4 +1,4 @@ -## defradb client rpc replicator set +## defradb client p2p replicator set Set a P2P replicator @@ -9,21 +9,19 @@ A replicator replicates one or all collection(s) from this node to another. ``` -defradb client rpc replicator set [-f, --full | -c, --collection] [flags] +defradb client p2p replicator set [-c, --collection] [flags] ``` ### Options ``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for set + -c, --collection strings Define the collection for the replicator + -h, --help help for set ``` ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -31,10 +29,11 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 8f5c3477c3..5e748229e2 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -41,6 +41,7 @@ defradb client query [query request] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_rpc_addreplicator.md b/docs/cli/defradb_client_rpc_addreplicator.md deleted file mode 100644 index e80b667f18..0000000000 --- a/docs/cli/defradb_client_rpc_addreplicator.md +++ /dev/null @@ -1,37 +0,0 @@ -## defradb client rpc addreplicator - -Add a new replicator - -### Synopsis - -Use this command if you wish to add a new target replicator -for the P2P data sync system. - -``` -defradb client rpc addreplicator [flags] -``` - -### Options - -``` - -h, --help help for addreplicator -``` - -### Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -### SEE ALSO - -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server - diff --git a/docs/cli/defradb_client_rpc_replicator_delete.md b/docs/cli/defradb_client_rpc_replicator_delete.md deleted file mode 100644 index c851d2f508..0000000000 --- a/docs/cli/defradb_client_rpc_replicator_delete.md +++ /dev/null @@ -1,38 +0,0 @@ -## defradb client rpc replicator delete - -Delete a replicator. It will stop synchronizing - -### Synopsis - -Delete a replicator. It will stop synchronizing. - -``` -defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] -``` - -### Options - -``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for delete -``` - -### Options inherited from parent commands - -``` - --addr string RPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -### SEE ALSO - -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system - diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index c36c8d4bce..6b04bb2a5d 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -22,6 +22,7 @@ Make changes, updates, or look for existing schema types. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` @@ -29,7 +30,6 @@ Make changes, updates, or look for existing schema types. * [defradb client](defradb_client.md) - Interact with a DefraDB node * [defradb client schema add](defradb_client_schema_add.md) - Add new schema -* [defradb client schema list](defradb_client_schema_list.md) - List schema types with their respective fields * [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index b278431034..aa73039d0c 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -38,6 +38,7 @@ defradb client schema add [schema] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md index 0a20968378..91f2f324e3 100644 --- a/docs/cli/defradb_client_schema_migration.md +++ b/docs/cli/defradb_client_schema_migration.md @@ -22,12 +22,16 @@ Make set or look for existing schema migrations on a DefraDB node. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client schema migration down](defradb_client_schema_migration_down.md) - Reverse a migration on the specified schema version. * [defradb client schema migration get](defradb_client_schema_migration_get.md) - Gets the schema migrations within DefraDB +* [defradb client schema migration reload](defradb_client_schema_migration_reload.md) - Reload the schema migrations within DefraDB * [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB +* [defradb client schema migration up](defradb_client_schema_migration_up.md) - Runs a migration on the specified schema version. diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md new file mode 100644 index 0000000000..3d8a2eb6a5 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_down.md @@ -0,0 +1,37 @@ +## defradb client schema migration down + +Reverse a migration on the specified schema version. + +### Synopsis + +Reverse a migration on the specified schema version. + +``` +defradb client schema migration down --version [flags] +``` + +### Options + +``` + -h, --help help for down + --version string Schema version id +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_get.md b/docs/cli/defradb_client_schema_migration_get.md index d2164ed6bd..20ed8edb91 100644 --- a/docs/cli/defradb_client_schema_migration_get.md +++ b/docs/cli/defradb_client_schema_migration_get.md @@ -31,6 +31,7 @@ defradb client schema migration get [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_list.md b/docs/cli/defradb_client_schema_migration_reload.md similarity index 65% rename from docs/cli/defradb_client_schema_list.md rename to docs/cli/defradb_client_schema_migration_reload.md index ffbe253e31..f9acfd2d19 100644 --- a/docs/cli/defradb_client_schema_list.md +++ b/docs/cli/defradb_client_schema_migration_reload.md @@ -1,15 +1,19 @@ -## defradb client schema list +## defradb client schema migration reload -List schema types with their respective fields +Reload the schema migrations within DefraDB + +### Synopsis + +Reload the schema migrations within DefraDB ``` -defradb client schema list [flags] +defradb client schema migration reload [flags] ``` ### Options ``` - -h, --help help for list + -h, --help help for reload ``` ### Options inherited from parent commands @@ -22,10 +26,11 @@ defradb client schema list [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md index 8013fd2a29..b9626bfeed 100644 --- a/docs/cli/defradb_client_schema_migration_set.md +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -38,6 +38,7 @@ defradb client schema migration set [src] [dst] [cfg] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md new file mode 100644 index 0000000000..a637f2f28d --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_up.md @@ -0,0 +1,37 @@ +## defradb client schema migration up + +Runs a migration on the specified schema version. + +### Synopsis + +Runs a migration on the specified schema version. + +``` +defradb client schema migration up --version [flags] +``` + +### Options + +``` + -h, --help help for up + --version string Schema version id +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index ec64d293e0..ba04faddf2 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -40,6 +40,7 @@ defradb client schema patch [schema] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_blocks.md b/docs/cli/defradb_client_tx.md similarity index 63% rename from docs/cli/defradb_client_blocks.md rename to docs/cli/defradb_client_tx.md index e05a853440..4feab4af7b 100644 --- a/docs/cli/defradb_client_blocks.md +++ b/docs/cli/defradb_client_tx.md @@ -1,11 +1,15 @@ -## defradb client blocks +## defradb client tx -Interact with the database's blockstore +Create, commit, and discard DefraDB transactions + +### Synopsis + +Create, commit, and discard DefraDB transactions ### Options ``` - -h, --help help for blocks + -h, --help help for tx ``` ### Options inherited from parent commands @@ -18,11 +22,14 @@ Interact with the database's blockstore --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore +* [defradb client tx commit](defradb_client_tx_commit.md) - Commit a DefraDB transaction. +* [defradb client tx create](defradb_client_tx_create.md) - Create a new DefraDB transaction. +* [defradb client tx discard](defradb_client_tx_discard.md) - Discard a DefraDB transaction. diff --git a/docs/cli/defradb_client_peerid.md b/docs/cli/defradb_client_tx_commit.md similarity index 73% rename from docs/cli/defradb_client_peerid.md rename to docs/cli/defradb_client_tx_commit.md index f4596111c8..21f0b50325 100644 --- a/docs/cli/defradb_client_peerid.md +++ b/docs/cli/defradb_client_tx_commit.md @@ -1,19 +1,19 @@ -## defradb client peerid +## defradb client tx commit -Get the PeerID of the node +Commit a DefraDB transaction. ### Synopsis -Get the PeerID of the node. +Commit a DefraDB transaction. ``` -defradb client peerid [flags] +defradb client tx commit [id] [flags] ``` ### Options ``` - -h, --help help for peerid + -h, --help help for commit ``` ### Options inherited from parent commands @@ -26,10 +26,11 @@ defradb client peerid [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md new file mode 100644 index 0000000000..8ba600b611 --- /dev/null +++ b/docs/cli/defradb_client_tx_create.md @@ -0,0 +1,38 @@ +## defradb client tx create + +Create a new DefraDB transaction. + +### Synopsis + +Create a new DefraDB transaction. + +``` +defradb client tx create [flags] +``` + +### Options + +``` + --concurrent Transaction is concurrent + -h, --help help for create + --read-only Transaction is read only +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions + diff --git a/docs/cli/defradb_client_blocks_get.md b/docs/cli/defradb_client_tx_discard.md similarity index 71% rename from docs/cli/defradb_client_blocks_get.md rename to docs/cli/defradb_client_tx_discard.md index 38ff02b63c..d1f0bb6025 100644 --- a/docs/cli/defradb_client_blocks_get.md +++ b/docs/cli/defradb_client_tx_discard.md @@ -1,15 +1,19 @@ -## defradb client blocks get +## defradb client tx discard -Get a block by its CID from the blockstore +Discard a DefraDB transaction. + +### Synopsis + +Discard a DefraDB transaction. ``` -defradb client blocks get [CID] [flags] +defradb client tx discard [id] [flags] ``` ### Options ``` - -h, --help help for get + -h, --help help for discard ``` ### Options inherited from parent commands @@ -22,10 +26,11 @@ defradb client blocks get [CID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/go.mod b/go.mod index 5c99acb439..83ee818703 100644 --- a/go.mod +++ b/go.mod @@ -33,8 +33,6 @@ require ( github.com/multiformats/go-multiaddr v0.10.1 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 - github.com/pkg/errors v0.9.1 - github.com/planetscale/vtprotobuf v0.5.0 github.com/sourcenetwork/immutable v0.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 @@ -49,6 +47,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.40.0 go.uber.org/zap v1.25.0 golang.org/x/crypto v0.13.0 + golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 golang.org/x/net v0.14.0 google.golang.org/grpc v1.58.1 google.golang.org/protobuf v1.31.0 @@ -156,6 +155,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect @@ -187,7 +187,6 @@ require ( go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.12.0 // indirect diff --git a/go.sum b/go.sum index 3cb6fe1269..e198ec35a4 100644 --- a/go.sum +++ b/go.sum @@ -1086,8 +1086,6 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM= -github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= diff --git a/http/client.go b/http/client.go index 867cdc3bb1..9dd7b7b065 100644 --- a/http/client.go +++ b/http/client.go @@ -35,11 +35,10 @@ type Client struct { } func NewClient(rawURL string) (*Client, error) { - baseURL, err := url.Parse(rawURL) + httpClient, err := newHttpClient(rawURL) if err != nil { return nil, err } - httpClient := newHttpClient(baseURL.JoinPath("/api/v0")) return &Client{httpClient}, nil } @@ -418,6 +417,20 @@ func (c *Client) PrintDump(ctx context.Context) error { return err } +func (c *Client) PeerInfo(ctx context.Context) (*PeerInfoResponse, error) { + methodURL := c.http.baseURL.JoinPath("p2p", "info") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var res PeerInfoResponse + if err := c.http.requestJson(req, &res); err != nil { + return nil, err + } + return &res, nil +} + func (c *Client) Close(ctx context.Context) { // do nothing } diff --git a/http/client_collection.go b/http/client_collection.go index 16157a9f96..9641157d1b 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -93,7 +93,7 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er return err } - docMap, err := documentJSON(doc) + docMap, err := doc.ToJSONPatch() if err != nil { return err } @@ -120,7 +120,7 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er func (c *Collection) Update(ctx context.Context, doc *client.Document) error { methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, doc.Key().String()) - body, err := documentJSON(doc) + body, err := doc.ToJSONPatch() if err != nil { return err } @@ -313,7 +313,12 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo if err := c.http.requestJson(req, &docMap); err != nil { return nil, err } - return client.NewDocFromMap(docMap) + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return nil, err + } + doc.Clean() + return doc, nil } func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { diff --git a/http/client_tx.go b/http/client_tx.go index 8df82007a6..ac16c29288 100644 --- a/http/client_tx.go +++ b/http/client_tx.go @@ -26,6 +26,14 @@ type Transaction struct { http *httpClient } +func NewTransaction(rawURL string, id uint64) (*Transaction, error) { + httpClient, err := newHttpClient(rawURL) + if err != nil { + return nil, err + } + return &Transaction{id, httpClient}, nil +} + func (c *Transaction) ID() uint64 { return c.id } diff --git a/http/errors.go b/http/errors.go index c2808603cf..848d293a91 100644 --- a/http/errors.go +++ b/http/errors.go @@ -15,22 +15,28 @@ import ( "errors" ) -const ( - errInvalidRequestBody = "invalid request body" - errDocKeyDoesNotMatch = "document key does not match" - errStreamingNotSupported = "streaming not supported" - errMigrationNotFound = "migration not found" - errMissingRequest = "missing request" - errInvalidTransactionId = "invalid transaction id" -) - +// Errors returnable from this package. +// +// This list is incomplete. Undefined errors may also be returned. +// Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrInvalidRequestBody = errors.New(errInvalidRequestBody) - ErrDocKeyDoesNotMatch = errors.New(errDocKeyDoesNotMatch) - ErrStreamingNotSupported = errors.New(errStreamingNotSupported) - ErrMigrationNotFound = errors.New(errMigrationNotFound) - ErrMissingRequest = errors.New(errMissingRequest) - ErrInvalidTransactionId = errors.New(errInvalidTransactionId) + ErrNoListener = errors.New("cannot serve with no listener") + ErrSchema = errors.New("base must start with the http or https scheme") + ErrDatabaseNotAvailable = errors.New("no database available") + ErrFormNotSupported = errors.New("content type application/x-www-form-urlencoded not yet supported") + ErrBodyEmpty = errors.New("body cannot be empty") + ErrMissingGQLRequest = errors.New("missing GraphQL request") + ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") + ErrStreamingUnsupported = errors.New("streaming unsupported") + ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrPayloadFormat = errors.New("invalid payload format") + ErrMissingNewKey = errors.New("missing _newKey for imported doc") + ErrInvalidRequestBody = errors.New("invalid request body") + ErrDocKeyDoesNotMatch = errors.New("document key does not match") + ErrStreamingNotSupported = errors.New("streaming not supported") + ErrMigrationNotFound = errors.New("migration not found") + ErrMissingRequest = errors.New("missing request") + ErrInvalidTransactionId = errors.New("invalid transaction id") ) type errorResponse struct { diff --git a/http/handler.go b/http/handler.go new file mode 100644 index 0000000000..242dc5938c --- /dev/null +++ b/http/handler.go @@ -0,0 +1,138 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "fmt" + "net/http" + "sync" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +// Version is the identifier for the current API version. +var Version string = "v0" + +// playgroundHandler is set when building with the playground build tag +var playgroundHandler http.Handler = http.HandlerFunc(http.NotFound) + +type Handler struct { + db client.DB + router *chi.Mux + txs *sync.Map +} + +func NewHandler(db client.DB, opts ServerOptions) *Handler { + txs := &sync.Map{} + + tx_handler := &txHandler{} + store_handler := &storeHandler{} + collection_handler := &collectionHandler{} + lens_handler := &lensHandler{} + ccip_handler := &ccipHandler{} + + router := chi.NewRouter() + router.Use(middleware.RequestLogger(&logFormatter{})) + router.Use(middleware.Recoverer) + router.Use(CorsMiddleware(opts)) + router.Use(ApiMiddleware(db, txs, opts)) + + router.Route("/api/"+Version, func(api chi.Router) { + api.Use(TransactionMiddleware, StoreMiddleware) + api.Route("/tx", func(tx chi.Router) { + tx.Post("/", tx_handler.NewTxn) + tx.Post("/concurrent", tx_handler.NewConcurrentTxn) + tx.Post("/{id}", tx_handler.Commit) + tx.Delete("/{id}", tx_handler.Discard) + }) + api.Route("/backup", func(backup chi.Router) { + backup.Post("/export", store_handler.BasicExport) + backup.Post("/import", store_handler.BasicImport) + }) + api.Route("/schema", func(schema chi.Router) { + schema.Post("/", store_handler.AddSchema) + schema.Patch("/", store_handler.PatchSchema) + schema.Post("/default", store_handler.SetDefaultSchemaVersion) + }) + api.Route("/collections", func(collections chi.Router) { + collections.Get("/", store_handler.GetCollection) + // with collection middleware + collections_tx := collections.With(CollectionMiddleware) + collections_tx.Get("/{name}", collection_handler.GetAllDocKeys) + collections_tx.Post("/{name}", collection_handler.Create) + collections_tx.Patch("/{name}", collection_handler.UpdateWith) + collections_tx.Delete("/{name}", collection_handler.DeleteWith) + collections_tx.Post("/{name}/indexes", collection_handler.CreateIndex) + collections_tx.Get("/{name}/indexes", collection_handler.GetIndexes) + collections_tx.Delete("/{name}/indexes/{index}", collection_handler.DropIndex) + collections_tx.Get("/{name}/{key}", collection_handler.Get) + collections_tx.Patch("/{name}/{key}", collection_handler.Update) + collections_tx.Delete("/{name}/{key}", collection_handler.Delete) + }) + api.Route("/lens", func(lens chi.Router) { + lens.Use(LensMiddleware) + lens.Get("/", lens_handler.Config) + lens.Post("/", lens_handler.SetMigration) + lens.Post("/reload", lens_handler.ReloadLenses) + lens.Get("/{version}", lens_handler.HasMigration) + lens.Post("/{version}/up", lens_handler.MigrateUp) + lens.Post("/{version}/down", lens_handler.MigrateDown) + }) + api.Route("/graphql", func(graphQL chi.Router) { + graphQL.Get("/", store_handler.ExecRequest) + graphQL.Post("/", store_handler.ExecRequest) + }) + api.Route("/ccip", func(ccip chi.Router) { + ccip.Get("/{sender}/{data}", ccip_handler.ExecCCIP) + ccip.Post("/", ccip_handler.ExecCCIP) + }) + api.Route("/p2p", func(p2p chi.Router) { + p2p.Get("/info", store_handler.PeerInfo) + p2p.Route("/replicators", func(p2p_replicators chi.Router) { + p2p_replicators.Get("/", store_handler.GetAllReplicators) + p2p_replicators.Post("/", store_handler.SetReplicator) + p2p_replicators.Delete("/", store_handler.DeleteReplicator) + }) + p2p.Route("/collections", func(p2p_collections chi.Router) { + p2p_collections.Get("/", store_handler.GetAllP2PCollections) + p2p_collections.Post("/{id}", store_handler.AddP2PCollection) + p2p_collections.Delete("/{id}", store_handler.RemoveP2PCollection) + }) + }) + api.Route("/debug", func(debug chi.Router) { + debug.Get("/dump", store_handler.PrintDump) + }) + }) + + router.Handle("/*", playgroundHandler) + + return &Handler{ + db: db, + router: router, + txs: txs, + } +} + +func (h *Handler) Transaction(id uint64) (datastore.Txn, error) { + tx, ok := h.txs.Load(id) + if !ok { + return nil, fmt.Errorf("invalid transaction id") + } + return tx.(datastore.Txn), nil +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.router.ServeHTTP(w, req) +} diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index 7884e16df7..4fb9e5259c 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -49,7 +49,7 @@ func TestCCIPGet_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() @@ -87,7 +87,7 @@ func TestCCIPGet_WithSubscription(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() @@ -104,7 +104,7 @@ func TestCCIPGet_WithInvalidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() @@ -132,7 +132,7 @@ func TestCCIPPost_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() @@ -163,7 +163,7 @@ func TestCCIPPost_WithInvalidGraphQLRequest(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() @@ -176,7 +176,7 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", nil) rec := httptest.NewRecorder() - handler := NewServer(cdb) + handler := NewHandler(cdb, ServerOptions{}) handler.ServeHTTP(rec, req) res := rec.Result() diff --git a/http/handler_collection.go b/http/handler_collection.go index 8f8ff8423b..607c1f1b21 100644 --- a/http/handler_collection.go +++ b/http/handler_collection.go @@ -47,9 +47,14 @@ func (s *collectionHandler) Create(rw http.ResponseWriter, req *http.Request) { } switch t := body.(type) { - case []map[string]any: + case []any: var docList []*client.Document - for _, docMap := range t { + for _, v := range t { + docMap, ok := v.(map[string]any) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrInvalidRequestBody}) + return + } doc, err := client.NewDocFromMap(docMap) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) diff --git a/http/handler_lens.go b/http/handler_lens.go index ccf8dd01a8..d5ddb704c8 100644 --- a/http/handler_lens.go +++ b/http/handler_lens.go @@ -61,7 +61,15 @@ func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, result) + var value []map[string]any + err = enumerable.ForEach(result, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, value) } func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { @@ -77,7 +85,15 @@ func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, result) + var value []map[string]any + err = enumerable.ForEach(result, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, value) } func (s *lensHandler) Config(rw http.ResponseWriter, req *http.Request) { diff --git a/api/http/playground.go b/http/handler_playground.go similarity index 100% rename from api/http/playground.go rename to http/handler_playground.go diff --git a/http/handler_store.go b/http/handler_store.go index 945f6115f8..120b9f9018 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -242,6 +242,18 @@ func (s *storeHandler) PrintDump(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) } +type PeerInfoResponse struct { + PeerID string `json:"peerID"` +} + +func (s *storeHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) { + var res PeerInfoResponse + if value, ok := req.Context().Value(peerIdContextKey).(string); ok { + res.PeerID = value + } + responseJSON(rw, http.StatusOK, &res) +} + type GraphQLRequest struct { Query string `json:"query"` } diff --git a/http/http_client.go b/http/http_client.go index 48323607ab..13abb3c6d0 100644 --- a/http/http_client.go +++ b/http/http_client.go @@ -16,6 +16,7 @@ import ( "io" "net/http" "net/url" + "strings" ) type httpClient struct { @@ -24,12 +25,19 @@ type httpClient struct { txValue string } -func newHttpClient(baseURL *url.URL) *httpClient { +func newHttpClient(rawURL string) (*httpClient, error) { + if !strings.HasPrefix(rawURL, "http") { + rawURL = "http://" + rawURL + } + baseURL, err := url.Parse(rawURL) + if err != nil { + return nil, err + } client := httpClient{ client: http.DefaultClient, - baseURL: baseURL, + baseURL: baseURL.JoinPath("/api/v0"), } - return &client + return &client, nil } func (c *httpClient) withTxn(value uint64) *httpClient { diff --git a/http/middleware.go b/http/middleware.go index 28f1e0ff1e..932797ff2c 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -14,9 +14,12 @@ import ( "context" "net/http" "strconv" + "strings" "sync" "github.com/go-chi/chi/v5" + "github.com/go-chi/cors" + "golang.org/x/exp/slices" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" @@ -50,15 +53,37 @@ var ( // If a transaction exists, all operations will be executed // in the current transaction context. colContextKey = contextKey("col") + // peerIdContextKey contains the peerId of the DefraDB node. + peerIdContextKey = contextKey("peerId") ) +// CorsMiddleware handles cross origin request +func CorsMiddleware(opts ServerOptions) func(http.Handler) http.Handler { + return cors.Handler(cors.Options{ + AllowOriginFunc: func(r *http.Request, origin string) bool { + if slices.Contains(opts.AllowedOrigins, "*") { + return true + } + return slices.Contains(opts.AllowedOrigins, strings.ToLower(origin)) + }, + AllowedMethods: []string{"GET", "HEAD", "POST", "PATCH", "DELETE"}, + AllowedHeaders: []string{"Content-Type"}, + MaxAge: 300, + }) +} + // ApiMiddleware sets the required context values for all API requests. -func ApiMiddleware(db client.DB, txs *sync.Map) func(http.Handler) http.Handler { +func ApiMiddleware(db client.DB, txs *sync.Map, opts ServerOptions) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if opts.TLS.HasValue() { + rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") + } + ctx := req.Context() ctx = context.WithValue(ctx, dbContextKey, db) ctx = context.WithValue(ctx, txsContextKey, txs) + ctx = context.WithValue(ctx, peerIdContextKey, opts.PeerID) next.ServeHTTP(rw, req.WithContext(ctx)) }) } diff --git a/http/server.go b/http/server.go index 7ad21e0632..ccfefb08b1 100644 --- a/http/server.go +++ b/http/server.go @@ -11,107 +11,312 @@ package http import ( + "context" + "crypto/tls" + "fmt" + "net" "net/http" - "sync" + "path" + "strings" - "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" + "github.com/sourcenetwork/immutable" + "golang.org/x/crypto/acme/autocert" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" ) +const ( + // These constants are best effort durations that fit our current API + // and possibly prevent from running out of file descriptors. + // readTimeout = 5 * time.Second + // writeTimeout = 10 * time.Second + // idleTimeout = 120 * time.Second + + // Temparily disabling timeouts until [this proposal](https://github.com/golang/go/issues/54136) is merged. + // https://github.com/sourcenetwork/defradb/issues/927 + readTimeout = 0 + writeTimeout = 0 + idleTimeout = 0 +) + +const ( + httpPort = ":80" + httpsPort = ":443" +) + +// Server struct holds the Handler for the HTTP API. type Server struct { - db client.DB - router *chi.Mux - txs *sync.Map -} - -func NewServer(db client.DB) *Server { - txs := &sync.Map{} - - tx_handler := &txHandler{} - store_handler := &storeHandler{} - collection_handler := &collectionHandler{} - lens_handler := &lensHandler{} - ccip_handler := &ccipHandler{} - - router := chi.NewRouter() - router.Use(middleware.RequestLogger(&logFormatter{})) - router.Use(middleware.Recoverer) - - router.Route("/api/v0", func(api chi.Router) { - api.Use(ApiMiddleware(db, txs), TransactionMiddleware, StoreMiddleware) - api.Route("/tx", func(tx chi.Router) { - tx.Post("/", tx_handler.NewTxn) - tx.Post("/concurrent", tx_handler.NewConcurrentTxn) - tx.Post("/{id}", tx_handler.Commit) - tx.Delete("/{id}", tx_handler.Discard) - }) - api.Route("/backup", func(backup chi.Router) { - backup.Post("/export", store_handler.BasicExport) - backup.Post("/import", store_handler.BasicImport) - }) - api.Route("/schema", func(schema chi.Router) { - schema.Post("/", store_handler.AddSchema) - schema.Patch("/", store_handler.PatchSchema) - schema.Post("/default", store_handler.SetDefaultSchemaVersion) - }) - api.Route("/collections", func(collections chi.Router) { - collections.Get("/", store_handler.GetCollection) - // with collection middleware - collections_tx := collections.With(CollectionMiddleware) - collections_tx.Get("/{name}", collection_handler.GetAllDocKeys) - collections_tx.Post("/{name}", collection_handler.Create) - collections_tx.Patch("/{name}", collection_handler.UpdateWith) - collections_tx.Delete("/{name}", collection_handler.DeleteWith) - collections_tx.Post("/{name}/indexes", collection_handler.CreateIndex) - collections_tx.Get("/{name}/indexes", collection_handler.GetIndexes) - collections_tx.Delete("/{name}/indexes/{index}", collection_handler.DropIndex) - collections_tx.Get("/{name}/{key}", collection_handler.Get) - collections_tx.Patch("/{name}/{key}", collection_handler.Update) - collections_tx.Delete("/{name}/{key}", collection_handler.Delete) - }) - api.Route("/lens", func(lens chi.Router) { - lens.Use(LensMiddleware) - lens.Get("/", lens_handler.Config) - lens.Post("/", lens_handler.SetMigration) - lens.Post("/reload", lens_handler.ReloadLenses) - lens.Get("/{version}", lens_handler.HasMigration) - lens.Post("/{version}/up", lens_handler.MigrateUp) - lens.Post("/{version}/down", lens_handler.MigrateDown) - }) - api.Route("/graphql", func(graphQL chi.Router) { - graphQL.Get("/", store_handler.ExecRequest) - graphQL.Post("/", store_handler.ExecRequest) - }) - api.Route("/ccip", func(ccip chi.Router) { - ccip.Get("/{sender}/{data}", ccip_handler.ExecCCIP) - ccip.Post("/", ccip_handler.ExecCCIP) - }) - api.Route("/p2p", func(p2p chi.Router) { - p2p.Route("/replicators", func(p2p_replicators chi.Router) { - p2p_replicators.Get("/", store_handler.GetAllReplicators) - p2p_replicators.Post("/", store_handler.SetReplicator) - p2p_replicators.Delete("/", store_handler.DeleteReplicator) - }) - p2p.Route("/collections", func(p2p_collections chi.Router) { - p2p_collections.Get("/", store_handler.GetAllP2PCollections) - p2p_collections.Post("/{id}", store_handler.AddP2PCollection) - p2p_collections.Delete("/{id}", store_handler.RemoveP2PCollection) - }) - }) - api.Route("/debug", func(debug chi.Router) { - debug.Get("/dump", store_handler.PrintDump) - }) - }) - - return &Server{ - db: db, - router: router, - txs: txs, - } -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - s.router.ServeHTTP(w, req) + options ServerOptions + listener net.Listener + certManager *autocert.Manager + // address that is assigned to the server on listen + address string + + http.Server +} + +type ServerOptions struct { + // AllowedOrigins is the list of allowed origins for CORS. + AllowedOrigins []string + // PeerID is the p2p id of the server node. + PeerID string + // TLS enables https when the value is present. + TLS immutable.Option[TLSOptions] + // RootDirectory is the directory for the node config. + RootDir string + // Domain is the domain for the API (optional). + Domain immutable.Option[string] +} + +type TLSOptions struct { + // PublicKey is the public key for TLS. Ignored if domain is set. + PublicKey string + // PrivateKey is the private key for TLS. Ignored if domain is set. + PrivateKey string + // Email is the address for the CA to send problem notifications (optional) + Email string + // Port is the tls port + Port string +} + +// NewServer instantiates a new server with the given http.Handler. +func NewServer(db client.DB, options ...func(*Server)) *Server { + srv := &Server{ + Server: http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + IdleTimeout: idleTimeout, + }, + } + + for _, opt := range append(options, DefaultOpts()) { + opt(srv) + } + + srv.Handler = NewHandler(db, srv.options) + + return srv +} + +func newHTTPRedirServer(m *autocert.Manager) *Server { + srv := &Server{ + Server: http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + IdleTimeout: idleTimeout, + }, + } + + srv.Addr = httpPort + srv.Handler = m.HTTPHandler(nil) + + return srv +} + +// DefaultOpts returns the default options for the server. +func DefaultOpts() func(*Server) { + return func(s *Server) { + if s.Addr == "" { + s.Addr = "localhost:9181" + } + } +} + +// WithAllowedOrigins returns an option to set the allowed origins for CORS. +func WithAllowedOrigins(origins ...string) func(*Server) { + return func(s *Server) { + s.options.AllowedOrigins = append(s.options.AllowedOrigins, origins...) + } +} + +// WithAddress returns an option to set the address for the server. +func WithAddress(addr string) func(*Server) { + return func(s *Server) { + s.Addr = addr + + // If the address is not localhost, we check to see if it's a valid IP address. + // If it's not a valid IP, we assume that it's a domain name to be used with TLS. + if !strings.HasPrefix(addr, "localhost:") && !strings.HasPrefix(addr, ":") { + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + ip := net.ParseIP(host) + if ip == nil { + s.Addr = httpPort + s.options.Domain = immutable.Some(host) + } + } + } +} + +// WithCAEmail returns an option to set the email address for the CA to send problem notifications. +func WithCAEmail(email string) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Email = email + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithPeerID returns an option to set the identifier of the server node. +func WithPeerID(id string) func(*Server) { + return func(s *Server) { + s.options.PeerID = id + } +} + +// WithRootDir returns an option to set the root directory for the node config. +func WithRootDir(rootDir string) func(*Server) { + return func(s *Server) { + s.options.RootDir = rootDir + } +} + +// WithSelfSignedCert returns an option to set the public and private keys for TLS. +func WithSelfSignedCert(pubKey, privKey string) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.PublicKey = pubKey + tlsOpt.PrivateKey = privKey + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithTLS returns an option to enable TLS. +func WithTLS() func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Port = httpsPort + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithTLSPort returns an option to set the port for TLS. +func WithTLSPort(port int) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Port = fmt.Sprintf(":%d", port) + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// Listen creates a new net.Listener and saves it on the receiver. +func (s *Server) Listen(ctx context.Context) error { + var err error + if s.options.TLS.HasValue() { + return s.listenWithTLS(ctx) + } + + lc := net.ListenConfig{} + s.listener, err = lc.Listen(ctx, "tcp", s.Addr) + if err != nil { + return errors.WithStack(err) + } + + // Save the address on the server in case the port was set to random + // and that we want to see what was assigned. + s.address = s.listener.Addr().String() + + return nil +} + +func (s *Server) listenWithTLS(ctx context.Context) error { + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + // We only allow cipher suites that are marked secure + // by ssllabs + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + }, + ServerName: "DefraDB", + } + + if s.options.Domain.HasValue() && s.options.Domain.Value() != "" { + s.Addr = s.options.TLS.Value().Port + + if s.options.TLS.Value().Email == "" || s.options.TLS.Value().Email == config.DefaultAPIEmail { + return ErrNoEmail + } + + certCache := path.Join(s.options.RootDir, "autocerts") + + log.FeedbackInfo( + ctx, + "Generating auto certificate", + logging.NewKV("Domain", s.options.Domain.Value()), + logging.NewKV("Certificate cache", certCache), + ) + + m := &autocert.Manager{ + Cache: autocert.DirCache(certCache), + Prompt: autocert.AcceptTOS, + Email: s.options.TLS.Value().Email, + HostPolicy: autocert.HostWhitelist(s.options.Domain.Value()), + } + + cfg.GetCertificate = m.GetCertificate + + // We set manager on the server instance to later start + // a redirection server. + s.certManager = m + } else { + // When not using auto cert, we create a self signed certificate + // with the provided public and prive keys. + log.FeedbackInfo(ctx, "Generating self signed certificate") + + cert, err := tls.LoadX509KeyPair( + s.options.TLS.Value().PrivateKey, + s.options.TLS.Value().PublicKey, + ) + if err != nil { + return errors.WithStack(err) + } + + cfg.Certificates = []tls.Certificate{cert} + } + + var err error + s.listener, err = tls.Listen("tcp", s.Addr, cfg) + if err != nil { + return errors.WithStack(err) + } + + // Save the address on the server in case the port was set to random + // and that we want to see what was assigned. + s.address = s.listener.Addr().String() + + return nil +} + +// Run calls Serve with the receiver's listener. +func (s *Server) Run(ctx context.Context) error { + if s.listener == nil { + return ErrNoListener + } + + if s.certManager != nil { + // When using TLS it's important to redirect http requests to https + go func() { + srv := newHTTPRedirServer(s.certManager) + err := srv.ListenAndServe() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Info(ctx, "Something went wrong with the redirection server", logging.NewKV("Error", err)) + } + }() + } + return s.Serve(s.listener) +} + +// AssignedAddr returns the address that was assigned to the server on calls to listen. +func (s *Server) AssignedAddr() string { + return s.address } diff --git a/api/http/server_test.go b/http/server_test.go similarity index 92% rename from api/http/server_test.go rename to http/server_test.go index c19e60a2ac..790f710249 100644 --- a/api/http/server_test.go +++ b/http/server_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -197,7 +197,7 @@ func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { func TestNewServerWithoutOptions(t *testing.T) { s := NewServer(nil) assert.Equal(t, "localhost:9181", s.Addr) - assert.Equal(t, []string(nil), s.options.allowedOrigins) + assert.Equal(t, []string(nil), s.options.AllowedOrigins) } func TestNewServerWithAddress(t *testing.T) { @@ -207,41 +207,41 @@ func TestNewServerWithAddress(t *testing.T) { func TestNewServerWithDomainAddress(t *testing.T) { s := NewServer(nil, WithAddress("example.com")) - assert.Equal(t, "example.com", s.options.domain.Value()) - assert.NotNil(t, s.options.tls) + assert.Equal(t, "example.com", s.options.Domain.Value()) + assert.NotNil(t, s.options.TLS) } func TestNewServerWithAllowedOrigins(t *testing.T) { s := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) - assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.allowedOrigins) + assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.AllowedOrigins) } func TestNewServerWithCAEmail(t *testing.T) { s := NewServer(nil, WithCAEmail("me@example.com")) - assert.Equal(t, "me@example.com", s.options.tls.Value().email) + assert.Equal(t, "me@example.com", s.options.TLS.Value().Email) } func TestNewServerWithPeerID(t *testing.T) { s := NewServer(nil, WithPeerID("12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR")) - assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", s.options.peerID) + assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", s.options.PeerID) } func TestNewServerWithRootDir(t *testing.T) { dir := t.TempDir() s := NewServer(nil, WithRootDir(dir)) - assert.Equal(t, dir, s.options.rootDir) + assert.Equal(t, dir, s.options.RootDir) } func TestNewServerWithTLSPort(t *testing.T) { s := NewServer(nil, WithTLSPort(44343)) - assert.Equal(t, ":44343", s.options.tls.Value().port) + assert.Equal(t, ":44343", s.options.TLS.Value().Port) } func TestNewServerWithSelfSignedCert(t *testing.T) { s := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) - assert.Equal(t, "pub.key", s.options.tls.Value().pubKey) - assert.Equal(t, "priv.key", s.options.tls.Value().privKey) - assert.NotNil(t, s.options.tls) + assert.Equal(t, "pub.key", s.options.TLS.Value().PublicKey) + assert.Equal(t, "priv.key", s.options.TLS.Value().PrivateKey) + assert.NotNil(t, s.options.TLS) } func TestNewHTTPRedirServer(t *testing.T) { diff --git a/http/utils.go b/http/utils.go index a171e0ed38..c7b1507c4e 100644 --- a/http/utils.go +++ b/http/utils.go @@ -34,25 +34,6 @@ func responseJSON(rw http.ResponseWriter, status int, out any) { json.NewEncoder(rw).Encode(out) //nolint:errcheck } -func documentJSON(doc *client.Document) ([]byte, error) { - docMap, err := doc.ToMap() - if err != nil { - return nil, err - } - delete(docMap, "_key") - - for field, value := range doc.Values() { - if !value.IsDirty() { - delete(docMap, field.Name()) - } - if value.IsDelete() { - docMap[field.Name()] = nil - } - } - - return json.Marshal(docMap) -} - func parseError(msg any) error { switch msg { case client.ErrDocumentNotFound.Error(): diff --git a/logging/registry.go b/logging/registry.go index 7cd7b808a2..9410498a72 100644 --- a/logging/registry.go +++ b/logging/registry.go @@ -44,6 +44,9 @@ func setConfig(newConfig Config) Config { } func updateLoggers(config Config) { + registryMutex.Lock() + defer registryMutex.Unlock() + for loggerName, loggers := range registry { newLoggerConfig := config.forLogger(loggerName) diff --git a/net/api/client/client.go b/net/api/client/client.go deleted file mode 100644 index 2ea92bd14c..0000000000 --- a/net/api/client/client.go +++ /dev/null @@ -1,169 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "fmt" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - codec "github.com/planetscale/vtprotobuf/codec/grpc" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/proto" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" - pb "github.com/sourcenetwork/defradb/net/pb" -) - -func init() { - encoding.RegisterCodec(codec.Codec{}) -} - -type Client struct { - c pb.CollectionClient - conn *grpc.ClientConn -} - -// NewClient returns a new defra gRPC client connected to the target address. -func NewClient(target string, opts ...grpc.DialOption) (*Client, error) { - conn, err := grpc.Dial(target, opts...) - if err != nil { - return nil, err - } - - return &Client{ - c: pb.NewCollectionClient(conn), - conn: conn, - }, nil -} - -func (c *Client) Close() error { - return c.conn.Close() -} - -// SetReplicator sends a request to add a target replicator to the DB peer. -func (c *Client) SetReplicator( - ctx context.Context, - paddr ma.Multiaddr, - collections ...string, -) (peer.ID, error) { - if paddr == nil { - return "", errors.New("target address can't be empty") - } - resp, err := c.c.SetReplicator(ctx, &pb.SetReplicatorRequest{ - Collections: collections, - Addr: paddr.Bytes(), - }) - if err != nil { - return "", errors.Wrap("could not add replicator", err) - } - return peer.IDFromBytes(resp.PeerID) -} - -// DeleteReplicator sends a request to add a target replicator to the DB peer. -func (c *Client) DeleteReplicator( - ctx context.Context, - pid peer.ID, - collections ...string, -) error { - _, err := c.c.DeleteReplicator(ctx, &pb.DeleteReplicatorRequest{ - PeerID: []byte(pid), - }) - return err -} - -// GetAllReplicators sends a request to add a target replicator to the DB peer. -func (c *Client) GetAllReplicators( - ctx context.Context, -) ([]client.Replicator, error) { - resp, err := c.c.GetAllReplicators(ctx, &pb.GetAllReplicatorRequest{}) - if err != nil { - return nil, errors.Wrap("could not get replicators", err) - } - reps := []client.Replicator{} - for _, rep := range resp.Replicators { - addr, err := ma.NewMultiaddrBytes(rep.Info.Addrs) - if err != nil { - return nil, errors.WithStack(err) - } - - pid, err := peer.IDFromBytes(rep.Info.Id) - if err != nil { - return nil, errors.WithStack(err) - } - - reps = append(reps, client.Replicator{ - Info: peer.AddrInfo{ - ID: pid, - Addrs: []ma.Multiaddr{addr}, - }, - Schemas: rep.Schemas, - }) - } - return reps, nil -} - -// AddP2PCollections sends a request to add P2P collecctions to the stored list. -func (c *Client) AddP2PCollections( - ctx context.Context, - collections ...string, -) error { - resp, err := c.c.AddP2PCollections(ctx, &pb.AddP2PCollectionsRequest{ - Collections: collections, - }) - if err != nil { - return errors.Wrap("could not add P2P collection topics", err) - } - if resp.Err != "" { - return errors.New(fmt.Sprintf("could not add P2P collection topics: %s", resp)) - } - return nil -} - -// RemoveP2PCollections sends a request to remove P2P collecctions from the stored list. -func (c *Client) RemoveP2PCollections( - ctx context.Context, - collections ...string, -) error { - resp, err := c.c.RemoveP2PCollections(ctx, &pb.RemoveP2PCollectionsRequest{ - Collections: collections, - }) - if err != nil { - return errors.Wrap("could not remove P2P collection topics", err) - } - if resp.Err != "" { - return errors.New(fmt.Sprintf("could not remove P2P collection topics: %s", resp)) - } - return nil -} - -// RemoveP2PCollections sends a request to get all P2P collecctions from the stored list. -func (c *Client) GetAllP2PCollections( - ctx context.Context, -) ([]client.P2PCollection, error) { - resp, err := c.c.GetAllP2PCollections(ctx, &pb.GetAllP2PCollectionsRequest{}) - if err != nil { - return nil, errors.Wrap("could not get all P2P collection topics", err) - } - var collections []client.P2PCollection - for _, col := range resp.Collections { - collections = append(collections, client.P2PCollection{ - ID: col.Id, - Name: col.Name, - }) - } - return collections, nil -} diff --git a/net/api/pb/Makefile b/net/api/pb/Makefile deleted file mode 100644 index 62eef77354..0000000000 --- a/net/api/pb/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc \ - --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ - --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ - --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ - --go-vtproto_opt=features=marshal+unmarshal+size \ - $< - -clean: - rm -f *.pb.go - rm -f *pb_test.go - -.PHONY: clean \ No newline at end of file diff --git a/net/api/pb/api.pb.go b/net/api/pb/api.pb.go deleted file mode 100644 index ad48069b8f..0000000000 --- a/net/api/pb/api.pb.go +++ /dev/null @@ -1,1100 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.9 -// source: api.proto - -package api_pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SetReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` -} - -func (x *SetReplicatorRequest) Reset() { - *x = SetReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorRequest) ProtoMessage() {} - -func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{0} -} - -func (x *SetReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -func (x *SetReplicatorRequest) GetAddr() []byte { - if x != nil { - return x.Addr - } - return nil -} - -type SetReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *SetReplicatorReply) Reset() { - *x = SetReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorReply) ProtoMessage() {} - -func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{1} -} - -func (x *SetReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorRequest) Reset() { - *x = DeleteReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorRequest) ProtoMessage() {} - -func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{2} -} - -func (x *DeleteReplicatorRequest) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorReply) Reset() { - *x = DeleteReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorReply) ProtoMessage() {} - -func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{3} -} - -func (x *DeleteReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type GetAllReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllReplicatorRequest) Reset() { - *x = GetAllReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorRequest) ProtoMessage() {} - -func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{4} -} - -type GetAllReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` -} - -func (x *GetAllReplicatorReply) Reset() { - *x = GetAllReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply) ProtoMessage() {} - -func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5} -} - -func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if x != nil { - return x.Replicators - } - return nil -} - -type AddP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *AddP2PCollectionsRequest) Reset() { - *x = AddP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsRequest) ProtoMessage() {} - -func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{6} -} - -func (x *AddP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type AddP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *AddP2PCollectionsReply) Reset() { - *x = AddP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsReply) ProtoMessage() {} - -func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{7} -} - -func (x *AddP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type RemoveP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *RemoveP2PCollectionsRequest) Reset() { - *x = RemoveP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} - -func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{8} -} - -func (x *RemoveP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type RemoveP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *RemoveP2PCollectionsReply) Reset() { - *x = RemoveP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsReply) ProtoMessage() {} - -func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{9} -} - -func (x *RemoveP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type GetAllP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllP2PCollectionsRequest) Reset() { - *x = GetAllP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} - -func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{10} -} - -type GetAllP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *GetAllP2PCollectionsReply) Reset() { - *x = GetAllP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{11} -} - -func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if x != nil { - return x.Collections - } - return nil -} - -type GetAllReplicatorReply_Replicators struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` -} - -func (x *GetAllReplicatorReply_Replicators) Reset() { - *x = GetAllReplicatorReply_Replicators{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply_Replicators) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} - -func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { - if x != nil { - return x.Schemas - } - return nil -} - -type GetAllReplicatorReply_Replicators_Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` -} - -func (x *GetAllReplicatorReply_Replicators_Info) Reset() { - *x = GetAllReplicatorReply_Replicators_Info{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply_Replicators_Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} - -func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5, 0, 0} -} - -func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { - if x != nil { - return x.Id - } - return nil -} - -func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { - if x != nil { - return x.Addrs - } - return nil -} - -type GetAllP2PCollectionsReply_Collection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetAllP2PCollectionsReply_Collection) Reset() { - *x = GetAllP2PCollectionsReply_Collection{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply_Collection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *GetAllP2PCollectionsReply_Collection) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *GetAllP2PCollectionsReply_Collection) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -var File_api_proto protoreflect.FileDescriptor - -var file_api_proto_rawDesc = []byte{ - 0x0a, 0x09, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x62, 0x22, 0x4c, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, - 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, - 0x31, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, - 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, - 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, - 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, - 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, - 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, - 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x04, 0x0a, 0x07, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1f, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x41, - 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, - 0x08, 0x2f, 0x3b, 0x61, 0x70, 0x69, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_api_proto_rawDescOnce sync.Once - file_api_proto_rawDescData = file_api_proto_rawDesc -) - -func file_api_proto_rawDescGZIP() []byte { - file_api_proto_rawDescOnce.Do(func() { - file_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_proto_rawDescData) - }) - return file_api_proto_rawDescData -} - -var file_api_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_api_proto_goTypes = []interface{}{ - (*SetReplicatorRequest)(nil), // 0: api.pb.SetReplicatorRequest - (*SetReplicatorReply)(nil), // 1: api.pb.SetReplicatorReply - (*DeleteReplicatorRequest)(nil), // 2: api.pb.DeleteReplicatorRequest - (*DeleteReplicatorReply)(nil), // 3: api.pb.DeleteReplicatorReply - (*GetAllReplicatorRequest)(nil), // 4: api.pb.GetAllReplicatorRequest - (*GetAllReplicatorReply)(nil), // 5: api.pb.GetAllReplicatorReply - (*AddP2PCollectionsRequest)(nil), // 6: api.pb.AddP2PCollectionsRequest - (*AddP2PCollectionsReply)(nil), // 7: api.pb.AddP2PCollectionsReply - (*RemoveP2PCollectionsRequest)(nil), // 8: api.pb.RemoveP2PCollectionsRequest - (*RemoveP2PCollectionsReply)(nil), // 9: api.pb.RemoveP2PCollectionsReply - (*GetAllP2PCollectionsRequest)(nil), // 10: api.pb.GetAllP2PCollectionsRequest - (*GetAllP2PCollectionsReply)(nil), // 11: api.pb.GetAllP2PCollectionsReply - (*GetAllReplicatorReply_Replicators)(nil), // 12: api.pb.GetAllReplicatorReply.Replicators - (*GetAllReplicatorReply_Replicators_Info)(nil), // 13: api.pb.GetAllReplicatorReply.Replicators.Info - (*GetAllP2PCollectionsReply_Collection)(nil), // 14: api.pb.GetAllP2PCollectionsReply.Collection -} -var file_api_proto_depIdxs = []int32{ - 12, // 0: api.pb.GetAllReplicatorReply.replicators:type_name -> api.pb.GetAllReplicatorReply.Replicators - 14, // 1: api.pb.GetAllP2PCollectionsReply.collections:type_name -> api.pb.GetAllP2PCollectionsReply.Collection - 13, // 2: api.pb.GetAllReplicatorReply.Replicators.info:type_name -> api.pb.GetAllReplicatorReply.Replicators.Info - 0, // 3: api.pb.Service.SetReplicator:input_type -> api.pb.SetReplicatorRequest - 2, // 4: api.pb.Service.DeleteReplicator:input_type -> api.pb.DeleteReplicatorRequest - 4, // 5: api.pb.Service.GetAllReplicators:input_type -> api.pb.GetAllReplicatorRequest - 6, // 6: api.pb.Service.AddP2PCollections:input_type -> api.pb.AddP2PCollectionsRequest - 8, // 7: api.pb.Service.RemoveP2PCollections:input_type -> api.pb.RemoveP2PCollectionsRequest - 10, // 8: api.pb.Service.GetAllP2PCollections:input_type -> api.pb.GetAllP2PCollectionsRequest - 1, // 9: api.pb.Service.SetReplicator:output_type -> api.pb.SetReplicatorReply - 3, // 10: api.pb.Service.DeleteReplicator:output_type -> api.pb.DeleteReplicatorReply - 5, // 11: api.pb.Service.GetAllReplicators:output_type -> api.pb.GetAllReplicatorReply - 7, // 12: api.pb.Service.AddP2PCollections:output_type -> api.pb.AddP2PCollectionsReply - 9, // 13: api.pb.Service.RemoveP2PCollections:output_type -> api.pb.RemoveP2PCollectionsReply - 11, // 14: api.pb.Service.GetAllP2PCollections:output_type -> api.pb.GetAllP2PCollectionsReply - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_api_proto_init() } -func file_api_proto_init() { - if File_api_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply_Collection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_api_proto_rawDesc, - NumEnums: 0, - NumMessages: 15, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_api_proto_goTypes, - DependencyIndexes: file_api_proto_depIdxs, - MessageInfos: file_api_proto_msgTypes, - }.Build() - File_api_proto = out.File - file_api_proto_rawDesc = nil - file_api_proto_goTypes = nil - file_api_proto_depIdxs = nil -} diff --git a/net/api/pb/api.proto b/net/api/pb/api.proto deleted file mode 100644 index 367997c7af..0000000000 --- a/net/api/pb/api.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; -package api.pb; - -option go_package = "/;api_pb"; - -message SetReplicatorRequest { - repeated string collections = 1; - bytes addr = 2; -} - -message SetReplicatorReply { - bytes peerID = 1; -} - -message DeleteReplicatorRequest { - bytes peerID = 1; -} - -message DeleteReplicatorReply { - bytes peerID = 1; -} - -message GetAllReplicatorRequest {} - -message GetAllReplicatorReply { - message Replicators { - message Info { - bytes id = 1; - bytes addrs = 2; - } - Info info = 1; - repeated string schemas = 2; - } - - repeated Replicators replicators = 1; - -} - -message AddP2PCollectionsRequest { - repeated string collections = 1; -} - -message AddP2PCollectionsReply { - string err = 1; -} - -message RemoveP2PCollectionsRequest { - repeated string collections = 1; -} - -message RemoveP2PCollectionsReply { - string err = 1; -} - -message GetAllP2PCollectionsRequest {} - -message GetAllP2PCollectionsReply { - message Collection { - string id = 1; - string name = 2; - } - repeated Collection collections = 1; -} - - -// Service is the peer-to-peer network API for document sync -service Service { - // SetReplicator for this peer - rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} - - // DeleteReplicator for this peer - rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} - - // DeleteReplicator for this peer - rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} - - rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} - - rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} - - rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} -} \ No newline at end of file diff --git a/net/api/pb/api_grpc.pb.go b/net/api/pb/api_grpc.pb.go deleted file mode 100644 index 5d1bc204d3..0000000000 --- a/net/api/pb/api_grpc.pb.go +++ /dev/null @@ -1,300 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.9 -// source: api.proto - -package api_pb - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - Service_SetReplicator_FullMethodName = "/api.pb.Service/SetReplicator" - Service_DeleteReplicator_FullMethodName = "/api.pb.Service/DeleteReplicator" - Service_GetAllReplicators_FullMethodName = "/api.pb.Service/GetAllReplicators" - Service_AddP2PCollections_FullMethodName = "/api.pb.Service/AddP2PCollections" - Service_RemoveP2PCollections_FullMethodName = "/api.pb.Service/RemoveP2PCollections" - Service_GetAllP2PCollections_FullMethodName = "/api.pb.Service/GetAllP2PCollections" -) - -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServiceClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type serviceClient struct { - cc grpc.ClientConnInterface -} - -func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { - return &serviceClient{cc} -} - -func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, Service_SetReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, Service_DeleteReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, Service_GetAllReplicators_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_AddP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_RemoveP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_GetAllP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ServiceServer is the server API for Service service. -// All implementations must embed UnimplementedServiceServer -// for forward compatibility -type ServiceServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) - mustEmbedUnimplementedServiceServer() -} - -// UnimplementedServiceServer must be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { -} - -func (UnimplementedServiceServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (UnimplementedServiceServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (UnimplementedServiceServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (UnimplementedServiceServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (UnimplementedServiceServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (UnimplementedServiceServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") -} -func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} - -// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServiceServer will -// result in compilation errors. -type UnsafeServiceServer interface { - mustEmbedUnimplementedServiceServer() -} - -func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { - s.RegisterService(&Service_ServiceDesc, srv) -} - -func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_SetReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_DeleteReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_GetAllReplicators_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_AddP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_RemoveP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_GetAllP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Service_ServiceDesc is the grpc.ServiceDesc for Service service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Service_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "api.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Service_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Service_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Service_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Service_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Service_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Service_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} diff --git a/net/api/pb/api_vtproto.pb.go b/net/api/pb/api_vtproto.pb.go deleted file mode 100644 index e4ddfb9bcb..0000000000 --- a/net/api/pb/api_vtproto.pb.go +++ /dev/null @@ -1,2316 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 -// source: api.proto - -package api_pb - -import ( - fmt "fmt" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" - bits "math/bits" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarint(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Info != nil { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SetReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func skip(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLength - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLength - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflow = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go new file mode 100644 index 0000000000..f167b882d8 --- /dev/null +++ b/tests/clients/cli/wrapper.go @@ -0,0 +1,419 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net/http/httptest" + "strings" + + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/cli" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/http" +) + +var _ client.DB = (*Wrapper)(nil) + +type Wrapper struct { + db client.DB + store client.Store + cmd *cliWrapper + handler *http.Handler + httpServer *httptest.Server +} + +func NewWrapper(db client.DB) *Wrapper { + handler := http.NewHandler(db, http.ServerOptions{}) + httpServer := httptest.NewServer(handler) + cmd := newCliWrapper(httpServer.URL) + + return &Wrapper{ + db: db, + store: db, + cmd: cmd, + httpServer: httpServer, + handler: handler, + } +} + +func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error { + args := []string{"client", "p2p", "replicator", "set"} + args = append(args, "--collection", strings.Join(rep.Schemas, ",")) + + addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info) + if err != nil { + return err + } + args = append(args, addrs[0].String()) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + args := []string{"client", "p2p", "replicator", "delete"} + + addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info) + if err != nil { + return err + } + args = append(args, addrs[0].String()) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + args := []string{"client", "p2p", "replicator", "getall"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var reps []client.Replicator + if err := json.Unmarshal(data, &reps); err != nil { + return nil, err + } + return reps, nil +} + +func (w *Wrapper) AddP2PCollection(ctx context.Context, collectionID string) error { + args := []string{"client", "p2p", "collection", "add"} + args = append(args, collectionID) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) RemoveP2PCollection(ctx context.Context, collectionID string) error { + args := []string{"client", "p2p", "collection", "remove"} + args = append(args, collectionID) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) GetAllP2PCollections(ctx context.Context) ([]string, error) { + args := []string{"client", "p2p", "collection", "getall"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cols []string + if err := json.Unmarshal(data, &cols); err != nil { + return nil, err + } + return cols, nil +} + +func (w *Wrapper) BasicImport(ctx context.Context, filepath string) error { + args := []string{"client", "backup", "import"} + args = append(args, filepath) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) BasicExport(ctx context.Context, config *client.BackupConfig) error { + args := []string{"client", "backup", "export"} + + if len(config.Collections) > 0 { + args = append(args, "--collections", strings.Join(config.Collections, ",")) + } + if config.Format != "" { + args = append(args, "--format", config.Format) + } + if config.Pretty { + args = append(args, "--pretty") + } + args = append(args, config.Filepath) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.CollectionDescription, error) { + args := []string{"client", "schema", "add"} + args = append(args, schema) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cols []client.CollectionDescription + if err := json.Unmarshal(data, &cols); err != nil { + return nil, err + } + return cols, nil +} + +func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setDefault bool) error { + args := []string{"client", "schema", "patch"} + if setDefault { + args = append(args, "--set-default") + } + args = append(args, patch) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + args := []string{"client", "schema", "set-default"} + args = append(args, schemaVersionID) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { + return w.LensRegistry().SetMigration(ctx, config) +} + +func (w *Wrapper) LensRegistry() client.LensRegistry { + return &LensRegistry{w.cmd} +} + +func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.CollectionName) (client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--name", name) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc client.CollectionDescription + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + return &Collection{w.cmd, colDesc}, nil +} + +func (w *Wrapper) GetCollectionBySchemaID(ctx context.Context, schemaId string) (client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--schema", schemaId) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc client.CollectionDescription + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + return &Collection{w.cmd, colDesc}, nil +} + +func (w *Wrapper) GetCollectionByVersionID(ctx context.Context, versionId string) (client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--version", versionId) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc client.CollectionDescription + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + return &Collection{w.cmd, colDesc}, nil +} + +func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { + args := []string{"client", "collection", "describe"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc []client.CollectionDescription + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + cols := make([]client.Collection, len(colDesc)) + for i, v := range colDesc { + cols[i] = &Collection{w.cmd, v} + } + return cols, err +} + +func (w *Wrapper) GetAllIndexes(ctx context.Context) (map[client.CollectionName][]client.IndexDescription, error) { + args := []string{"client", "index", "list"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var indexes map[client.CollectionName][]client.IndexDescription + if err := json.Unmarshal(data, &indexes); err != nil { + return nil, err + } + return indexes, nil +} + +func (w *Wrapper) ExecRequest(ctx context.Context, query string) *client.RequestResult { + args := []string{"client", "query"} + args = append(args, query) + + result := &client.RequestResult{} + + stdOut, stdErr, err := w.cmd.executeStream(ctx, args) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + buffer := bufio.NewReader(stdOut) + header, err := buffer.ReadString('\n') + if err != nil { + result.GQL.Errors = []error{err} + return result + } + if header == cli.SUB_RESULTS_HEADER { + result.Pub = w.execRequestSubscription(ctx, buffer) + return result + } + data, err := io.ReadAll(buffer) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + errData, err := io.ReadAll(stdErr) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + if len(errData) > 0 { + result.GQL.Errors = []error{fmt.Errorf("%s", errData)} + return result + } + + var response http.GraphQLResponse + if err = json.Unmarshal(data, &response); err != nil { + result.GQL.Errors = []error{err} + return result + } + result.GQL.Data = response.Data + result.GQL.Errors = response.Errors + return result +} + +func (w *Wrapper) execRequestSubscription(ctx context.Context, r io.Reader) *events.Publisher[events.Update] { + pubCh := events.New[events.Update](0, 0) + pub, err := events.NewPublisher[events.Update](pubCh, 0) + if err != nil { + return nil + } + + go func() { + dec := json.NewDecoder(r) + + for { + var response http.GraphQLResponse + if err := dec.Decode(&response); err != nil { + return + } + pub.Publish(client.GQLResult{ + Errors: response.Errors, + Data: response.Data, + }) + } + }() + + return pub +} + +func (w *Wrapper) NewTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { + args := []string{"client", "tx", "create"} + if readOnly { + args = append(args, "--read-only") + } + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res http.CreateTxResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + tx, err := w.handler.Transaction(res.ID) + if err != nil { + return nil, err + } + return &Transaction{tx, w.cmd}, nil +} + +func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { + args := []string{"client", "tx", "create"} + args = append(args, "--concurrent") + + if readOnly { + args = append(args, "--read-only") + } + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res http.CreateTxResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + tx, err := w.handler.Transaction(res.ID) + if err != nil { + return nil, err + } + return &Transaction{tx, w.cmd}, nil +} + +func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { + return &Wrapper{ + db: w.db, + store: w.db.WithTxn(tx), + cmd: w.cmd.withTxn(tx), + } +} + +func (w *Wrapper) Root() datastore.RootStore { + return w.db.Root() +} + +func (w *Wrapper) Blockstore() blockstore.Blockstore { + return w.db.Blockstore() +} + +func (w *Wrapper) Close(ctx context.Context) { + w.httpServer.CloseClientConnections() + w.httpServer.Close() + w.db.Close(ctx) +} + +func (w *Wrapper) Events() events.Events { + return w.db.Events() +} + +func (w *Wrapper) MaxTxnRetries() int { + return w.db.MaxTxnRetries() +} + +func (w *Wrapper) PrintDump(ctx context.Context) error { + return w.db.PrintDump(ctx) +} diff --git a/tests/clients/cli/wrapper_cli.go b/tests/clients/cli/wrapper_cli.go new file mode 100644 index 0000000000..1f73b20e25 --- /dev/null +++ b/tests/clients/cli/wrapper_cli.go @@ -0,0 +1,85 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/sourcenetwork/defradb/cli" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +type cliWrapper struct { + address string + txValue string +} + +func newCliWrapper(address string) *cliWrapper { + return &cliWrapper{ + address: strings.TrimPrefix(address, "http://"), + } +} + +func (w *cliWrapper) withTxn(tx datastore.Txn) *cliWrapper { + return &cliWrapper{ + address: w.address, + txValue: fmt.Sprintf("%d", tx.ID()), + } +} + +func (w *cliWrapper) execute(ctx context.Context, args []string) ([]byte, error) { + stdOut, stdErr, err := w.executeStream(ctx, args) + if err != nil { + return nil, err + } + stdOutData, err := io.ReadAll(stdOut) + if err != nil { + return nil, err + } + stdErrData, err := io.ReadAll(stdErr) + if err != nil { + return nil, err + } + if len(stdErrData) != 0 { + return nil, fmt.Errorf("%s", stdErrData) + } + return stdOutData, nil +} + +func (w *cliWrapper) executeStream(ctx context.Context, args []string) (io.ReadCloser, io.ReadCloser, error) { + stdOutRead, stdOutWrite := io.Pipe() + stdErrRead, stdErrWrite := io.Pipe() + + if w.txValue != "" { + args = append(args, "--tx", w.txValue) + } + args = append(args, "--url", w.address) + + cmd := cli.NewDefraCommand(config.DefaultConfig()) + cmd.SetOut(stdOutWrite) + cmd.SetErr(stdErrWrite) + cmd.SetArgs(args) + + cmd.SilenceErrors = true + cmd.SilenceUsage = true + + go func() { + err := cmd.Execute() + stdOutWrite.CloseWithError(err) + stdErrWrite.CloseWithError(err) + }() + + return stdOutRead, stdErrRead, nil +} diff --git a/tests/clients/cli/wrapper_collection.go b/tests/clients/cli/wrapper_collection.go new file mode 100644 index 0000000000..3500bdce7c --- /dev/null +++ b/tests/clients/cli/wrapper_collection.go @@ -0,0 +1,405 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/http" +) + +var _ client.Collection = (*Collection)(nil) + +type Collection struct { + cmd *cliWrapper + desc client.CollectionDescription +} + +func (c *Collection) Description() client.CollectionDescription { + return c.desc +} + +func (c *Collection) Name() string { + return c.desc.Name +} + +func (c *Collection) Schema() client.SchemaDescription { + return c.desc.Schema +} + +func (c *Collection) ID() uint32 { + return c.desc.ID +} + +func (c *Collection) SchemaID() string { + return c.desc.Schema.SchemaID +} + +func (c *Collection) Create(ctx context.Context, doc *client.Document) error { + args := []string{"client", "collection", "create"} + args = append(args, "--name", c.desc.Name) + + // We must call this here, else the doc key on the given object will not match + // that of the document saved in the database + err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + if err != nil { + return err + } + document, err := doc.String() + if err != nil { + return err + } + args = append(args, string(document)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + doc.Clean() + return nil +} + +func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { + args := []string{"client", "collection", "create"} + args = append(args, "--name", c.desc.Name) + + docMapList := make([]map[string]any, len(docs)) + for i, doc := range docs { + // We must call this here, else the doc key on the given object will not match + // that of the document saved in the database + err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + if err != nil { + return err + } + docMap, err := doc.ToMap() + if err != nil { + return err + } + docMapList[i] = docMap + } + documents, err := json.Marshal(docMapList) + if err != nil { + return err + } + args = append(args, string(documents)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + for _, doc := range docs { + doc.Clean() + } + return nil +} + +func (c *Collection) Update(ctx context.Context, doc *client.Document) error { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.desc.Name) + args = append(args, "--key", doc.Key().String()) + + document, err := doc.ToJSONPatch() + if err != nil { + return err + } + args = append(args, string(document)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + doc.Clean() + return nil +} + +func (c *Collection) Save(ctx context.Context, doc *client.Document) error { + _, err := c.Get(ctx, doc.Key(), true) + if err == nil { + return c.Update(ctx, doc) + } + if errors.Is(err, client.ErrDocumentNotFound) { + return c.Create(ctx, doc) + } + return err +} + +func (c *Collection) Delete(ctx context.Context, docKey client.DocKey) (bool, error) { + res, err := c.DeleteWithKey(ctx, docKey) + if err != nil { + return false, err + } + return res.Count == 1, nil +} + +func (c *Collection) Exists(ctx context.Context, docKey client.DocKey) (bool, error) { + _, err := c.Get(ctx, docKey, false) + if err != nil { + return false, err + } + return true, nil +} + +func (c *Collection) UpdateWith(ctx context.Context, target any, updater string) (*client.UpdateResult, error) { + switch t := target.(type) { + case string, map[string]any, *request.Filter: + return c.UpdateWithFilter(ctx, t, updater) + case client.DocKey: + return c.UpdateWithKey(ctx, t, updater) + case []client.DocKey: + return c.UpdateWithKeys(ctx, t, updater) + default: + return nil, client.ErrInvalidUpdateTarget + } +} + +func (c *Collection) updateWith( + ctx context.Context, + args []string, +) (*client.UpdateResult, error) { + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res client.UpdateResult + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (c *Collection) UpdateWithFilter( + ctx context.Context, + filter any, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.desc.Name) + args = append(args, "--updater", updater) + + filterJSON, err := json.Marshal(filter) + if err != nil { + return nil, err + } + args = append(args, "--filter", string(filterJSON)) + + return c.updateWith(ctx, args) +} + +func (c *Collection) UpdateWithKey( + ctx context.Context, + key client.DocKey, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.desc.Name) + args = append(args, "--key", key.String()) + args = append(args, "--updater", updater) + + return c.updateWith(ctx, args) +} + +func (c *Collection) UpdateWithKeys( + ctx context.Context, + docKeys []client.DocKey, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.desc.Name) + args = append(args, "--updater", updater) + + keys := make([]string, len(docKeys)) + for i, v := range docKeys { + keys[i] = v.String() + } + args = append(args, "--key", strings.Join(keys, ",")) + + return c.updateWith(ctx, args) +} + +func (c *Collection) DeleteWith(ctx context.Context, target any) (*client.DeleteResult, error) { + switch t := target.(type) { + case string, map[string]any, *request.Filter: + return c.DeleteWithFilter(ctx, t) + case client.DocKey: + return c.DeleteWithKey(ctx, t) + case []client.DocKey: + return c.DeleteWithKeys(ctx, t) + default: + return nil, client.ErrInvalidDeleteTarget + } +} + +func (c *Collection) deleteWith( + ctx context.Context, + args []string, +) (*client.DeleteResult, error) { + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res client.DeleteResult + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.desc.Name) + + filterJSON, err := json.Marshal(filter) + if err != nil { + return nil, err + } + args = append(args, "--filter", string(filterJSON)) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) DeleteWithKey(ctx context.Context, docKey client.DocKey) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.desc.Name) + args = append(args, "--key", docKey.String()) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) DeleteWithKeys(ctx context.Context, docKeys []client.DocKey) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.desc.Name) + + keys := make([]string, len(docKeys)) + for i, v := range docKeys { + keys[i] = v.String() + } + args = append(args, "--key", strings.Join(keys, ",")) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { + args := []string{"client", "collection", "get"} + args = append(args, "--name", c.desc.Name) + args = append(args, key.String()) + + if showDeleted { + args = append(args, "--show-deleted") + } + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var docMap map[string]any + if err := json.Unmarshal(data, &docMap); err != nil { + return nil, err + } + return client.NewDocFromMap(docMap) +} + +func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { + return &Collection{ + cmd: c.cmd.withTxn(tx), + desc: c.desc, + } +} + +func (c *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + args := []string{"client", "collection", "keys"} + args = append(args, "--name", c.desc.Name) + + stdOut, _, err := c.cmd.executeStream(ctx, args) + if err != nil { + return nil, err + } + docKeyCh := make(chan client.DocKeysResult) + + go func() { + dec := json.NewDecoder(stdOut) + defer close(docKeyCh) + + for { + var res http.DocKeyResult + if err := dec.Decode(&res); err != nil { + return + } + key, err := client.NewDocKeyFromString(res.Key) + if err != nil { + return + } + docKey := client.DocKeysResult{ + Key: key, + } + if res.Error != "" { + docKey.Err = fmt.Errorf(res.Error) + } + docKeyCh <- docKey + } + }() + + return docKeyCh, nil +} + +func (c *Collection) CreateIndex( + ctx context.Context, + indexDesc client.IndexDescription, +) (index client.IndexDescription, err error) { + args := []string{"client", "index", "create"} + args = append(args, "--collection", c.desc.Name) + args = append(args, "--name", indexDesc.Name) + + fields := make([]string, len(indexDesc.Fields)) + for i := range indexDesc.Fields { + fields[i] = indexDesc.Fields[i].Name + } + args = append(args, "--fields", strings.Join(fields, ",")) + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return index, err + } + if err := json.Unmarshal(data, &index); err != nil { + return index, err + } + return index, nil +} + +func (c *Collection) DropIndex(ctx context.Context, indexName string) error { + args := []string{"client", "index", "drop"} + args = append(args, "--collection", c.desc.Name) + args = append(args, "--name", indexName) + + _, err := c.cmd.execute(ctx, args) + return err +} + +func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + args := []string{"client", "index", "list"} + args = append(args, "--collection", c.desc.Name) + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var indexes []client.IndexDescription + if err := json.Unmarshal(data, &indexes); err != nil { + return nil, err + } + return indexes, nil +} diff --git a/tests/clients/cli/wrapper_lens.go b/tests/clients/cli/wrapper_lens.go new file mode 100644 index 0000000000..679a792662 --- /dev/null +++ b/tests/clients/cli/wrapper_lens.go @@ -0,0 +1,145 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" +) + +var _ client.LensRegistry = (*LensRegistry)(nil) + +type LensRegistry struct { + cmd *cliWrapper +} + +func (w *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { + return &LensRegistry{w.cmd.withTxn(tx)} +} + +func (w *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { + args := []string{"client", "schema", "migration", "set"} + args = append(args, config.SourceSchemaVersionID) + args = append(args, config.DestinationSchemaVersionID) + + lensCfg, err := json.Marshal(config.Lens) + if err != nil { + return err + } + args = append(args, string(lensCfg)) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *LensRegistry) ReloadLenses(ctx context.Context) error { + args := []string{"client", "schema", "migration", "reload"} + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *LensRegistry) MigrateUp( + ctx context.Context, + src enumerable.Enumerable[map[string]any], + schemaVersionID string, +) (enumerable.Enumerable[map[string]any], error) { + args := []string{"client", "schema", "migration", "up"} + args = append(args, "--version", schemaVersionID) + + var srcData []map[string]any + err := enumerable.ForEach(src, func(item map[string]any) { + srcData = append(srcData, item) + }) + if err != nil { + return nil, err + } + srcJSON, err := json.Marshal(srcData) + if err != nil { + return nil, err + } + args = append(args, string(srcJSON)) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var out enumerable.Enumerable[map[string]any] + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +func (w *LensRegistry) MigrateDown( + ctx context.Context, + src enumerable.Enumerable[map[string]any], + schemaVersionID string, +) (enumerable.Enumerable[map[string]any], error) { + args := []string{"client", "schema", "migration", "down"} + args = append(args, "--version", schemaVersionID) + + var srcData []map[string]any + err := enumerable.ForEach(src, func(item map[string]any) { + srcData = append(srcData, item) + }) + if err != nil { + return nil, err + } + srcJSON, err := json.Marshal(srcData) + if err != nil { + return nil, err + } + args = append(args, string(srcJSON)) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var out enumerable.Enumerable[map[string]any] + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +func (w *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { + args := []string{"client", "schema", "migration", "get"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cfgs []client.LensConfig + if err := json.Unmarshal(data, &cfgs); err != nil { + return nil, err + } + return cfgs, nil +} + +func (w *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { + cfgs, err := w.Config(ctx) + if err != nil { + return false, err + } + found := false + for _, cfg := range cfgs { + if cfg.SourceSchemaVersionID == schemaVersionID { + found = true + } + } + return found, nil +} diff --git a/tests/clients/cli/wrapper_tx.go b/tests/clients/cli/wrapper_tx.go new file mode 100644 index 0000000000..6656c7b058 --- /dev/null +++ b/tests/clients/cli/wrapper_tx.go @@ -0,0 +1,76 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "fmt" + + "github.com/sourcenetwork/defradb/datastore" +) + +var _ datastore.Txn = (*Transaction)(nil) + +type Transaction struct { + tx datastore.Txn + cmd *cliWrapper +} + +func (w *Transaction) ID() uint64 { + return w.tx.ID() +} + +func (w *Transaction) Commit(ctx context.Context) error { + args := []string{"client", "tx", "commit"} + args = append(args, fmt.Sprintf("%d", w.tx.ID())) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Transaction) Discard(ctx context.Context) { + args := []string{"client", "tx", "discard"} + args = append(args, fmt.Sprintf("%d", w.tx.ID())) + + w.cmd.execute(ctx, args) //nolint:errcheck +} + +func (w *Transaction) OnSuccess(fn func()) { + w.tx.OnSuccess(fn) +} + +func (w *Transaction) OnError(fn func()) { + w.tx.OnError(fn) +} + +func (w *Transaction) OnDiscard(fn func()) { + w.tx.OnDiscard(fn) +} + +func (w *Transaction) Rootstore() datastore.DSReaderWriter { + return w.tx.Rootstore() +} + +func (w *Transaction) Datastore() datastore.DSReaderWriter { + return w.tx.Datastore() +} + +func (w *Transaction) Headstore() datastore.DSReaderWriter { + return w.tx.Headstore() +} + +func (w *Transaction) DAGstore() datastore.DAGStore { + return w.tx.DAGstore() +} + +func (w *Transaction) Systemstore() datastore.DSReaderWriter { + return w.tx.Systemstore() +} diff --git a/http/wrapper.go b/tests/clients/http/wrapper.go similarity index 90% rename from http/wrapper.go rename to tests/clients/http/wrapper.go index eb91ffdb7a..10b34129d8 100644 --- a/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -12,7 +12,6 @@ package http import ( "context" - "fmt" "net/http/httptest" blockstore "github.com/ipfs/boxo/blockstore" @@ -20,6 +19,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/http" ) var _ client.DB = (*Wrapper)(nil) @@ -28,23 +28,23 @@ var _ client.DB = (*Wrapper)(nil) // single struct that implements the client.DB interface. type Wrapper struct { db client.DB - server *Server - client *Client + handler *http.Handler + client *http.Client httpServer *httptest.Server } func NewWrapper(db client.DB) (*Wrapper, error) { - server := NewServer(db) - httpServer := httptest.NewServer(server) + handler := http.NewHandler(db, http.ServerOptions{}) + httpServer := httptest.NewServer(handler) - client, err := NewClient(httpServer.URL) + client, err := http.NewClient(httpServer.URL) if err != nil { return nil, err } return &Wrapper{ db, - server, + handler, client, httpServer, }, nil @@ -131,11 +131,11 @@ func (w *Wrapper) NewTxn(ctx context.Context, readOnly bool) (datastore.Txn, err if err != nil { return nil, err } - server, ok := w.server.txs.Load(client.ID()) - if !ok { - return nil, fmt.Errorf("failed to get server transaction") + server, err := w.handler.Transaction(client.ID()) + if err != nil { + return nil, err } - return &TxWrapper{server.(datastore.Txn), client}, nil + return &TxWrapper{server, client}, nil } func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { @@ -143,11 +143,11 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor if err != nil { return nil, err } - server, ok := w.server.txs.Load(client.ID()) - if !ok { - return nil, fmt.Errorf("failed to get server transaction") + server, err := w.handler.Transaction(client.ID()) + if err != nil { + return nil, err } - return &TxWrapper{server.(datastore.Txn), client}, nil + return &TxWrapper{server, client}, nil } func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { diff --git a/http/wrapper_tx.go b/tests/clients/http/wrapper_tx.go similarity index 100% rename from http/wrapper_tx.go rename to tests/clients/http/wrapper_tx.go diff --git a/tests/integration/cli/client_backup_export_test.go b/tests/integration/cli/client_backup_export_test.go deleted file mode 100644 index 62f2677c7b..0000000000 --- a/tests/integration/cli/client_backup_export_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func createUser(t *testing.T, conf DefraNodeConfig) { - _, _ = runDefraCommand(t, conf, []string{ - "client", "query", `mutation { create_User(data: "{\"name\": \"John\"}") { _key } }`, - }) -} - -func TestBackup_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "backup"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestBackupExport_ForAllCollections_ShouldExport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - require.Equal( - t, - `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, - string(b), - ) -} - -func TestBackupExport_ForUserCollection_ShouldExport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "User", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - require.Equal( - t, - `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, - string(b), - ) -} - -func TestBackupExport_ForInvalidCollection_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "Invalid", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "collection does not exist") -} - -func TestBackupExport_InvalidFilePath_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/some/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "Invalid", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "invalid file path") -} diff --git a/tests/integration/cli/client_backup_import_test.go b/tests/integration/cli/client_backup_import_test.go deleted file mode 100644 index 8290dbe6de..0000000000 --- a/tests/integration/cli/client_backup_import_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestBackupImport_WithValidFile_ShouldImport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestBackupImport_WithExistingDoc_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "a document with the given dockey already exists") -} - -func TestBackupImport_ForInvalidCollection_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"Invalid":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "failed to get collection: datastore: key not found. Name: Invalid") -} - -func TestBackupImport_InvalidFilePath_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/some/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "invalid file path") -} diff --git a/tests/integration/cli/client_blocks_test.go b/tests/integration/cli/client_blocks_test.go deleted file mode 100644 index 08d1c22684..0000000000 --- a/tests/integration/cli/client_blocks_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import "testing" - -func TestClientBlocksEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestClientBlocksGetEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestClientBlocksGetInvalidCID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get", "invalid-cid"}) - _ = stopDefra() - assertContainsSubstring(t, stdout, "\"errors\"") -} - -func TestClientBlocksGetNonExistentCID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get", "bafybeieelb43ol5e5jiick2p7k4p577ph72ecwcuowlhbops4hpz24zhz4"}) - _ = stopDefra() - assertContainsSubstring(t, stdout, "could not find") -} diff --git a/tests/integration/cli/client_index_create_test.go b/tests/integration/cli/client_index_create_test.go deleted file mode 100644 index 89d6a4a18a..0000000000 --- a/tests/integration/cli/client_index_create_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func createUserCollection(t *testing.T, conf DefraNodeConfig) { - createCollection(t, conf, `type User { name: String }`) -} - -func createCollection(t *testing.T, conf DefraNodeConfig, colSchema string) { - fileName := schemaFileFixture(t, "schema.graphql", colSchema) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fileName}) - assertContainsSubstring(t, stdout, "success") -} - -func TestIndex_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "index"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestIndexCreate_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "index", "create"}) - assertContainsSubstring(t, stderr, "Usage") -} - -func TestIndexCreate_IfNoFieldsArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexCreate_IfNoCollectionArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--fields", "Name", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexCreate_IfCollectionExists_ShouldCreateIndex(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - "--fields", "name", - "--name", "users_name_index", - }) - nodeLog := stopDefra() - - jsonResponse := `{"data":{"index":{"Name":"users_name_index","ID":1,"Fields":[{"Name":"name","Direction":"ASC"}]}}}` - assertContainsSubstring(t, stdout, jsonResponse) - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexCreate_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_index_drop_test.go b/tests/integration/cli/client_index_drop_test.go deleted file mode 100644 index ce03e29524..0000000000 --- a/tests/integration/cli/client_index_drop_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestIndexDrop_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "index", "drop"}) - assertContainsSubstring(t, stderr, "Usage") -} - -const userColIndexOnNameFieldName = "users_name_index" - -func createIndexOnName(t *testing.T, conf DefraNodeConfig) { - createIndexOnField(t, conf, "User", "name", userColIndexOnNameFieldName) -} - -func createIndexOnField(t *testing.T, conf DefraNodeConfig, colName, fieldName, indexName string) { - runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", colName, - "--fields", fieldName, - "--name", indexName, - }) -} - -func TestIndexDrop_IfNoNameArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexDrop_IfNoCollectionArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexDrop_IfCollectionWithIndexExists_ShouldDropIndex(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - nodeLog := stopDefra() - - jsonResponse := `{"data":{"result":"success"}}` - assertContainsSubstring(t, stdout, jsonResponse) - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexDrop_IfCollectionDoesNotExist_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} - -func TestIndexDrop_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_index_list_test.go b/tests/integration/cli/client_index_list_test.go deleted file mode 100644 index cb2f7d5fac..0000000000 --- a/tests/integration/cli/client_index_list_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "encoding/json" - "testing" - - "github.com/sourcenetwork/defradb/client" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexList_IfCollectionIsNotSpecified_ShouldReturnAllIndexes(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createCollection(t, conf, `type User { name: String }`) - createCollection(t, conf, `type Product { name: String price: Int }`) - createIndexOnField(t, conf, "User", "name", "") - createIndexOnField(t, conf, "Product", "name", "") - createIndexOnField(t, conf, "Product", "price", "") - - stdout, _ := runDefraCommand(t, conf, []string{"client", "index", "list"}) - nodeLog := stopDefra() - - var resp struct { - Data struct { - Collections map[string][]client.IndexDescription `json:"collections"` - } `json:"data"` - } - err := json.Unmarshal([]byte(stdout[0]), &resp) - require.NoError(t, err) - - assert.Equal(t, len(resp.Data.Collections), 2) - assert.Equal(t, len(resp.Data.Collections["User"]), 1) - assert.Equal(t, len(resp.Data.Collections["Product"]), 2) - - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexList_IfCollectionIsSpecified_ShouldReturnCollectionsIndexes(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - createCollection(t, conf, `type Product { name: String price: Int }`) - createIndexOnField(t, conf, "Product", "name", "") - createIndexOnField(t, conf, "Product", "price", "") - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "list", - "--collection", "User", - }) - nodeLog := stopDefra() - - var resp struct { - Data struct { - Indexes []client.IndexDescription `json:"indexes"` - } `json:"data"` - } - err := json.Unmarshal([]byte(stdout[0]), &resp) - require.NoError(t, err) - - expectedDesc := client.IndexDescription{Name: userColIndexOnNameFieldName, ID: 1, Fields: []client.IndexedFieldDescription{{Name: "name", Direction: client.Ascending}}} - assert.Equal(t, 1, len(resp.Data.Indexes)) - assert.Equal(t, expectedDesc, resp.Data.Indexes[0]) - - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexList_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "list", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_peerid_test.go b/tests/integration/cli/client_peerid_test.go deleted file mode 100644 index 0592fd4aa1..0000000000 --- a/tests/integration/cli/client_peerid_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestPeerID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "peerid"}) - - defraLogLines := stopDefra() - - assertNotContainsSubstring(t, defraLogLines, "ERROR") - - assertContainsSubstring(t, stdout, "peerID") -} - -func TestPeerIDWithNoHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "peerid"}) - assertContainsSubstring(t, stderr, "failed to request PeerID") -} diff --git a/tests/integration/cli/client_ping_test.go b/tests/integration/cli/client_ping_test.go deleted file mode 100644 index a4e1eef96f..0000000000 --- a/tests/integration/cli/client_ping_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -func TestPingSimple(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "ping"}) - - nodeLog := stopDefra() - - assert.Contains(t, stdout, `{"data":{"response":"pong"}}`) - for _, line := range nodeLog { - assert.NotContains(t, line, "ERROR") - } -} - -func TestPingCommandToInvalidHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - _, stderr := runDefraCommand(t, conf, []string{"client", "ping", "--url", "'1!2:3!4'"}) - - nodeLog := stopDefra() - - for _, line := range nodeLog { - assert.NotContains(t, line, "ERROR") - } - // for some line in stderr to contain the error message - for _, line := range stderr { - if strings.Contains(line, config.ErrFailedToValidateConfig.Error()) { - return - } - } - t.Error("expected error message not found in stderr") -} - -func TestPingCommandNoHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - p, err := findFreePortInRange(t, 49152, 65535) - assert.NoError(t, err) - addr := fmt.Sprintf("localhost:%d", p) - _, stderr := runDefraCommand(t, conf, []string{"client", "ping", "--url", addr}) - assertContainsSubstring(t, stderr, "failed to send ping") -} diff --git a/tests/integration/cli/client_query_test.go b/tests/integration/cli/client_query_test.go deleted file mode 100644 index 6ca98cbade..0000000000 --- a/tests/integration/cli/client_query_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestRequestSimple(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", - "query IntrospectionQuery {__schema {queryType { name }}}", - }) - nodeLog := stopDefra() - - assertContainsSubstring(t, stdout, "Query") - assertNotContainsSubstring(t, nodeLog, "ERROR") -} - -func TestRequestInvalidQuery(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", "{}}"}) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "Syntax Error") -} - -func TestRequestWithErrorNoType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", "query { User { whatever } }"}) - - assertContainsSubstring(t, stdout, "Cannot query field") -} - -func TestRequestWithErrorNoField(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { User { nonexistent } }"}) - - assertContainsSubstring(t, stdout, `Cannot query field \"nonexistent\"`) -} - -func TestRequestQueryFromFile(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User123 { - XYZ: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - fname = schemaFileFixture(t, "query.graphql", ` - query { - __schema { - types { - name - fields { - name - type { - name - kind - } - } - } - } - }`) - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "-f", fname}) - - assertContainsSubstring(t, stdout, "Query") - - // Check that the User type is correctly returned - assertContainsSubstring(t, stdout, "User123") - assertContainsSubstring(t, stdout, "XYZ") -} diff --git a/tests/integration/cli/client_rpc_p2p_collection_test.go b/tests/integration/cli/client_rpc_p2p_collection_test.go deleted file mode 100644 index b44abcaefb..0000000000 --- a/tests/integration/cli/client_rpc_p2p_collection_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -// TBD diff --git a/tests/integration/cli/client_rpc_replicator_test.go b/tests/integration/cli/client_rpc_replicator_test.go deleted file mode 100644 index 1fd0e3c351..0000000000 --- a/tests/integration/cli/client_rpc_replicator_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" -) - -func TestReplicatorGetAllEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - portTCP, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - conf.GRPCAddr = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", portTCP) - if err != nil { - t.Fatal(err) - } - - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - tcpAddr := fmt.Sprintf("localhost:%d", portTCP) - _, stderr := runDefraCommand(t, conf, []string{"client", "--addr", tcpAddr, "rpc", "replicator", "getall"}) - assertContainsSubstring(t, stderr, "No replicator found") -} diff --git a/tests/integration/cli/client_schema_add_test.go b/tests/integration/cli/client_schema_add_test.go deleted file mode 100644 index 124fcba82a..0000000000 --- a/tests/integration/cli/client_schema_add_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAddSchemaFromFile(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - - nodeLog := stopDefra() - - jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreifxwnqwcg3uqqr3iydebnmeadmjxg722qauocdtjbusinjtzja7py","version_id":"bafkreifxwnqwcg3uqqr3iydebnmeadmjxg722qauocdtjbusinjtzja7py"}],"result":"success"}}` - assert.Contains(t, stdout, jsonReponse) - assertNotContainsSubstring(t, nodeLog, "ERROR") -} - -func TestAddSchemaWithDuplicateType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - fname1 := schemaFileFixture(t, "schema1.graphql", `type Post { id: ID title: String }`) - fname2 := schemaFileFixture(t, "schema2.graphql", `type Post { id: ID author: String }`) - - stdout1, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname1}) - stdout2, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname2}) - - _ = stopDefra() - - jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreibamgkyo3juvgx2b3ice4tjldcuxiibwo32kq22vfuyvzzgg7kfga","version_id":"bafkreibamgkyo3juvgx2b3ice4tjldcuxiibwo32kq22vfuyvzzgg7kfga"}],"result":"success"}}` - assertContainsSubstring(t, stdout1, jsonReponse) - assertContainsSubstring(t, stdout2, `schema type already exists. Name: Post`) -} diff --git a/tests/integration/cli/client_schema_migration_get_test.go b/tests/integration/cli/client_schema_migration_get_test.go deleted file mode 100644 index dd70879433..0000000000 --- a/tests/integration/cli/client_schema_migration_get_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" - - "github.com/sourcenetwork/defradb/tests/lenses" -) - -func TestSchemaMigrationGet_GivenOneArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - "notAnArg", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "too many arguments. Max: 0, Actual: 1") -} - -func TestSchemaMigrationGet_GivenNoMigrations_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, `{"data":{"configuration":[]}}`) -} - -func TestSchemaMigrationGet_GivenEmptyMigrationObj_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{}", - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":null}]}}`, - ) -} - -func TestSchemaMigrationGet_GivenEmptyMigration_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"lenses": []}`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[]}]}}`, - ) -} - -func TestSchemaMigrationGet_GivenMigration_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", - fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[`+ - fmt.Sprintf( - `{"Path":"%s",`, - lenses.SetDefaultModulePath, - )+ - `"Inverse":false,"Arguments":{"dst":"verified","value":true}}`+ - `]}]}}`, - ) -} diff --git a/tests/integration/cli/client_schema_migration_set_test.go b/tests/integration/cli/client_schema_migration_set_test.go deleted file mode 100644 index b9c0c5009f..0000000000 --- a/tests/integration/cli/client_schema_migration_set_test.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" - - "github.com/sourcenetwork/defradb/tests/lenses" -) - -func TestSchemaMigrationSet_GivenEmptyArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{"client", "schema", "migration", "set"}) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") -} - -func TestSchemaMigrationSet_GivenOneArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") -} - -func TestSchemaMigrationSet_GivenTwoArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: cfg") -} - -func TestSchemaMigrationSet_GivenFourArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "cfg", "extraArg", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "too many arguments. Max: 3, Actual: 4") -} - -func TestSchemaMigrationSet_GivenEmptySrcArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "", "bae", "path", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: src") -} - -func TestSchemaMigrationSet_GivenEmptyDstArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae", "", "path", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: dst") -} - -func TestSchemaMigrationSet_GivenEmptyCfgArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: cfg") -} - -func TestSchemaMigrationSet_GivenInvalidCfgJsonObject_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{--notvalidjson", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "invalid lens configuration: invalid character") -} - -func TestSchemaMigrationSet_GivenEmptyCfgObject_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{}", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithNoLenses_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"lenses": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithNoLensesUppercase_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"Lenses": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithUnknownProp_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"NotAProp": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "invalid lens configuration: json: unknown field") -} - -func TestSchemaMigrationSet_GivenCfgWithUnknownPath_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"Lenses": [{"path":"notAPath"}]}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "no such file or directory") -} - -func TestSchemaMigrationSet_GivenCfgWithLenses_ShouldSucceedAndMigrateDoc(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) - assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", - `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", - "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", - fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) - _ = stopDefra() - - assertContainsSubstring(t, stdout, `{"data":[{"name":"John","verified":true}]}`) -} - -func TestSchemaMigrationSet_GivenCfgWithLenseError_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) - assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", - `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bafkreibqw2l325up2tljc5oyjpjzftg4x7nhluzqoezrmz645jto6tnylu", - "bafkreihcyy243ed46jxlpwyryo3cfcvxcbnilpj63gy7smf4fqzyzxadze", - // Do not set lens parameters in order to generate error - fmt.Sprintf(`{"lenses": [{"path":"%s"}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) - _ = stopDefra() - - // Error generated from within lens module lazily executing within the query - assertContainsSubstring(t, stdout, "Parameters have not been set.") -} diff --git a/tests/integration/cli/client_schema_patch_test.go b/tests/integration/cli/client_schema_patch_test.go deleted file mode 100644 index 487dc9eda5..0000000000 --- a/tests/integration/cli/client_schema_patch_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestClientSchemaPatch(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", `[{ "op": "add", "path": "/User/Schema/Fields/-", "value": {"Name": "address", "Kind": "String"} }]`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `query IntrospectionQuery { __type (name: "User") { fields { name } }}`}) - assertContainsSubstring(t, stdout, "address") -} - -func TestClientSchemaPatch_InvalidJSONPatch(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - } - `) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", `[{ "op": "invalidOp" }]`}) - assertContainsSubstring(t, stdout, "Internal Server Error") -} diff --git a/tests/integration/cli/init_test.go b/tests/integration/cli/init_test.go deleted file mode 100644 index 7292d920c3..0000000000 --- a/tests/integration/cli/init_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -// Executing init command creates valid config file. -func TestCLIInitCommand(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - if !assert.FileExists(t, cfgfilePath) { - t.Fatal("Config file not created") - } -} - -func TestCLIInitCommandTwiceErrors(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - _, stderr = runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Configuration file already exists at "+cfgfilePath) -} - -// Executing init command twice, but second time reinitializing. -func TestInitCommandTwiceReinitalize(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - _, stderr = runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir, "--reinitialize"}) - assertContainsSubstring(t, stderr, "Deleted config file at "+cfgfilePath) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) -} diff --git a/tests/integration/cli/log_config_test.go b/tests/integration/cli/log_config_test.go deleted file mode 100644 index 55d1b18154..0000000000 --- a/tests/integration/cli/log_config_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" -) - -const ( - testLogger1 = "testLogger1" - testLogger2 = "testLogger2" - testLogger3 = "testLogger3" -) - -var ( - log1 = logging.MustNewLogger(testLogger1) - log2 = logging.MustNewLogger(testLogger2) - log3 = logging.MustNewLogger(testLogger3) -) - -func TestCLILogsToStderrGivenNamedLogLevel(t *testing.T) { - ctx := context.Background() - logLines := captureLogLines( - t, - func() { - // set the log levels - // general: error - // testLogger1: debug - // testLogger2: info - os.Args = append(os.Args, "--loglevel") - os.Args = append(os.Args, fmt.Sprintf("%s,%s=debug,%s=info", "error", testLogger1, testLogger2)) - }, - func() { - log1.Error(ctx, "error") - log1.Debug(ctx, "debug") - log2.Info(ctx, "info") - log3.Debug(ctx, "debug") // wont print, as logger3 will use global level defined above as 'error' - log3.Info(ctx, "info") // wont print, as logger3 will use global level defined above as 'error' - }, - ) - - assert.Len(t, logLines, 3) -} - -func captureLogLines(t *testing.T, setup func(), predicate func()) []string { - r, w, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - stderr := os.Stderr - os.Stderr = w - defer func() { - os.Stderr = stderr - }() - - directory := t.TempDir() - - // Set the default logger output path to a file in the temp dir - // so that production logs don't polute and confuse the tests - // os.Args = append(os.Args, "--logoutput", directory+"/log.txt") - os.Args = append(os.Args, "init", "--rootdir", directory) - - setup() - cfg := config.DefaultConfig() - defraCmd := cli.NewDefraCommand(cfg) - if err := defraCmd.Execute(context.Background()); err != nil { - t.Fatal(err) - } - predicate() - log1.Flush() - log2.Flush() - log3.Flush() - - w.Close() - var buf bytes.Buffer - _, _ = io.Copy(&buf, r) - logLines, err := parseLines(&buf) - if err != nil { - t.Fatal(err) - } - - return logLines -} - -func parseLines(r io.Reader) ([]string, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []string{} - for fileScanner.Scan() { - logLines = append(logLines, fileScanner.Text()) - } - - return logLines, nil -} diff --git a/tests/integration/cli/root_test.go b/tests/integration/cli/root_test.go deleted file mode 100644 index 33df29fc4d..0000000000 --- a/tests/integration/cli/root_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRootCommandEmptyRootDir(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootDirWithDefaultConfig(t *testing.T) { - conf := DefraNodeConfig{ - logPath: t.TempDir(), - } - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootDirFromEnv(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootWithNonexistentFlag(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"--foo"}) - assert.Contains(t, stdout, "Usage:") -} diff --git a/tests/integration/cli/serverdump_test.go b/tests/integration/cli/serverdump_test.go deleted file mode 100644 index ed8fcd4d9f..0000000000 --- a/tests/integration/cli/serverdump_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestServerDumpMemoryErrs(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"server-dump", "--store", "memory"}) - assertContainsSubstring(t, stderr, "server-side dump is only supported for the Badger datastore") -} - -func TestServerDumpInvalidStoreErrs(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"server-dump", "--store", "invalid"}) - // assertContainsSubstring(t, stderr, "invalid datastore type") - assertContainsSubstring(t, stderr, "server-side dump is only supported for the Badger datastore") -} diff --git a/tests/integration/cli/start_test.go b/tests/integration/cli/start_test.go deleted file mode 100644 index 1a6267f190..0000000000 --- a/tests/integration/cli/start_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" -) - -func TestStartCommandBasic(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithTLSIncomplete(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--tls", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithStoreMemory(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", "--store", "memory", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertContainsSubstring(t, stderr, "Building new memory store") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithP2PAddr(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - p2pport, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - addr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", p2pport) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--p2paddr", addr, - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - logstring := fmt.Sprintf("Starting P2P node, {\"P2P address\": \"%s\"}", addr) - assertContainsSubstring(t, stderr, logstring) - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithNoP2P(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--no-p2p", - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertNotContainsSubstring(t, stderr, "Starting P2P node") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithInvalidStoreType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--store", "invalid", - }) - assertContainsSubstring(t, stderr, "failed to load config: failed to validate config: invalid store type") -} diff --git a/tests/integration/cli/utils.go b/tests/integration/cli/utils.go deleted file mode 100644 index c94ce222dc..0000000000 --- a/tests/integration/cli/utils.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package clitest provides a testing framework for the Defra CLI, along with CLI integration tests. -*/ -package clitest - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "math/rand" - "net" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" -) - -const COMMAND_TIMEOUT_SECONDS = 2 * time.Second -const SUBCOMMAND_TIME_BUFFER_SECONDS = 200 * time.Millisecond - -type DefraNodeConfig struct { - rootDir string - logPath string - APIURL string - GRPCAddr string -} - -func NewDefraNodeDefaultConfig(t *testing.T) DefraNodeConfig { - t.Helper() - portAPI, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - portGRPC, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - - return DefraNodeConfig{ - rootDir: t.TempDir(), - logPath: "", - APIURL: fmt.Sprintf("localhost:%d", portAPI), - GRPCAddr: fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", portGRPC), - } -} - -// runDefraNode runs a defra node in a separate goroutine and returns a stopping function -// which also returns the node's execution log lines. -func runDefraNode(t *testing.T, conf DefraNodeConfig) func() []string { - t.Helper() - - if conf.logPath == "" { - conf.logPath = filepath.Join(t.TempDir(), "defra.log") - } - - var args []string - if conf.rootDir != "" { - args = append(args, "--rootdir", conf.rootDir) - } - if conf.APIURL != "" { - args = append(args, "--url", conf.APIURL) - } - if conf.GRPCAddr != "" { - args = append(args, "--tcpaddr", conf.GRPCAddr) - } - args = append(args, "--logoutput", conf.logPath) - - cfg := config.DefaultConfig() - ctx, cancel := context.WithCancel(context.Background()) - ready := make(chan struct{}) - go func(ready chan struct{}) { - defraCmd := cli.NewDefraCommand(cfg) - defraCmd.RootCmd.SetArgs( - append([]string{"start"}, args...), - ) - ready <- struct{}{} - err := defraCmd.Execute(ctx) - assert.NoError(t, err) - }(ready) - <-ready - time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) - cancelAndOutput := func() []string { - cancel() - time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) - lines, err := readLoglines(t, conf.logPath) - assert.NoError(t, err) - return lines - } - return cancelAndOutput -} - -// Runs a defra command and returns the stdout and stderr output. -func runDefraCommand(t *testing.T, conf DefraNodeConfig, args []string) (stdout, stderr []string) { - t.Helper() - cfg := config.DefaultConfig() - args = append([]string{ - "--url", conf.APIURL, - }, args...) - if !contains(args, "--rootdir") { - args = append(args, "--rootdir", t.TempDir()) - } - - ctx, cancel := context.WithTimeout(context.Background(), COMMAND_TIMEOUT_SECONDS) - defer cancel() - - stdout, stderr = captureOutput(func() { - defraCmd := cli.NewDefraCommand(cfg) - t.Log("executing defra command with args", args) - defraCmd.RootCmd.SetArgs(args) - _ = defraCmd.Execute(ctx) - }) - return stdout, stderr -} - -func contains(args []string, arg string) bool { - for _, a := range args { - if a == arg { - return true - } - } - return false -} - -func readLoglines(t *testing.T, fpath string) ([]string, error) { - f, err := os.Open(fpath) - if err != nil { - return nil, err - } - defer f.Close() //nolint:errcheck - scanner := bufio.NewScanner(f) - lines := make([]string, 0) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - err = scanner.Err() - assert.NoError(t, err) - return lines, nil -} - -func captureOutput(f func()) (stdout, stderr []string) { - oldStdout := os.Stdout - oldStderr := os.Stderr - rStdout, wStdout, err := os.Pipe() - if err != nil { - panic(err) - } - rStderr, wStderr, err := os.Pipe() - if err != nil { - panic(err) - } - os.Stdout = wStdout - os.Stderr = wStderr - - f() - - if err := wStdout.Close(); err != nil { - panic(err) - } - if err := wStderr.Close(); err != nil { - panic(err) - } - - os.Stdout = oldStdout - os.Stderr = oldStderr - - var stdoutBuf, stderrBuf bytes.Buffer - if _, err := io.Copy(&stdoutBuf, rStdout); err != nil { - panic(err) - } - if _, err := io.Copy(&stderrBuf, rStderr); err != nil { - panic(err) - } - - stdout = strings.Split(strings.TrimSuffix(stdoutBuf.String(), "\n"), "\n") - stderr = strings.Split(strings.TrimSuffix(stderrBuf.String(), "\n"), "\n") - - return -} - -var portsInUse = make(map[int]struct{}) -var portMutex = sync.Mutex{} - -// findFreePortInRange returns a free port in the range [minPort, maxPort]. -// The range of ports that are unfrequently used is [49152, 65535]. -func findFreePortInRange(t *testing.T, minPort, maxPort int) (int, error) { - if minPort < 1 || maxPort > 65535 || minPort > maxPort { - return 0, errors.New("invalid port range") - } - - const maxAttempts = 100 - for i := 0; i < maxAttempts; i++ { - port := rand.Intn(maxPort-minPort+1) + minPort - if _, ok := portsInUse[port]; ok { - continue - } - addr := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", addr) - if err == nil { - portMutex.Lock() - portsInUse[port] = struct{}{} - portMutex.Unlock() - t.Cleanup(func() { - portMutex.Lock() - delete(portsInUse, port) - portMutex.Unlock() - }) - _ = listener.Close() - return port, nil - } - } - - return 0, errors.New("unable to find a free port") -} - -func assertContainsSubstring(t *testing.T, haystack []string, substring string) { - t.Helper() - if !containsSubstring(haystack, substring) { - t.Fatalf("expected %q to contain %q", haystack, substring) - } -} - -func assertNotContainsSubstring(t *testing.T, haystack []string, substring string) { - t.Helper() - if containsSubstring(haystack, substring) { - t.Fatalf("expected %q to not contain %q", haystack, substring) - } -} - -func containsSubstring(haystack []string, substring string) bool { - for _, s := range haystack { - if strings.Contains(s, substring) { - return true - } - } - return false -} - -func schemaFileFixture(t *testing.T, fname string, schema string) string { - absFname := filepath.Join(t.TempDir(), fname) - err := os.WriteFile(absFname, []byte(schema), 0644) - assert.NoError(t, err) - return absFname -} diff --git a/tests/integration/cli/version_test.go b/tests/integration/cli/version_test.go deleted file mode 100644 index bc9c2a7e25..0000000000 --- a/tests/integration/cli/version_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// note: this assumes the version information *without* build-time info integrated. -func TestExecVersion(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, stderr := runDefraCommand(t, conf, []string{"version"}) - for _, line := range stderr { - assert.NotContains(t, line, "ERROR") - } - output := strings.Join(stdout, " ") - assert.Contains(t, output, "defradb") - assert.Contains(t, output, "built with Go") -} - -func TestExecVersionJSON(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, stderr := runDefraCommand(t, conf, []string{"version", "--format", "json"}) - for _, line := range stderr { - assert.NotContains(t, line, "ERROR") - } - output := strings.Join(stdout, " ") - assert.Contains(t, output, "go\":") - assert.Contains(t, output, "commit\":") - assert.Contains(t, output, "commitdate\":") - var data map[string]any - err := json.Unmarshal([]byte(output), &data) - assert.NoError(t, err) -} diff --git a/tests/integration/results.go b/tests/integration/results.go index 052de310c5..176b2e4cf2 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -28,7 +28,7 @@ type AnyOf []any // The comparison is relaxed when using client types other than goClientType. func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual any, msgAndArgs ...any) { switch client { - case httpClientType: + case httpClientType, cliClientType: if !areResultsAnyOf(expected, actual) { assert.Contains(t, expected, actual, msgAndArgs...) } @@ -42,7 +42,7 @@ func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual // The comparison is relaxed when using client types other than goClientType. func assertResultsEqual(t *testing.T, client ClientType, expected any, actual any, msgAndArgs ...any) { switch client { - case httpClientType: + case httpClientType, cliClientType: if !areResultsEqual(expected, actual) { assert.EqualValues(t, expected, actual, msgAndArgs...) } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index f41e1a7485..420e7f4c9c 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -32,15 +32,17 @@ import ( "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" + "github.com/sourcenetwork/defradb/tests/clients/cli" + "github.com/sourcenetwork/defradb/tests/clients/http" ) const ( clientGoEnvName = "DEFRA_CLIENT_GO" clientHttpEnvName = "DEFRA_CLIENT_HTTP" + clientCliEnvName = "DEFRA_CLIENT_CLI" memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" fileBadgerEnvName = "DEFRA_BADGER_FILE" fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" @@ -65,6 +67,9 @@ const ( // httpClientType enables running the test suite using // the http implementation of the client.DB interface. httpClientType ClientType = "http" + // cliClientType enables running the test suite using + // the cli implementation of the client.DB interface. + cliClientType ClientType = "cli" ) // The MutationType that tests will run using. @@ -101,6 +106,7 @@ var ( inMemoryStore bool httpClient bool goClient bool + cliClient bool mutationType MutationType databaseDir string ) @@ -118,6 +124,7 @@ func init() { // that don't have the flag defined httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName)) goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName)) + cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName)) badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) @@ -131,7 +138,7 @@ func init() { mutationType = CollectionSaveMutationType } - if !goClient && !httpClient { + if !goClient && !httpClient && !cliClient { // Default is to test go client type. goClient = true } @@ -162,8 +169,8 @@ func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { t.Skip("Assert panic with the change detector is not currently supported.") } - if httpClient { - // The http-client will return an error instead of panicing at the moment. + if httpClient || cliClient { + // The http / cli client will return an error instead of panicing at the moment. t.Skip("Assert panic with the http client is not currently supported.") } @@ -254,6 +261,9 @@ func GetDatabase(s *state) (cdb client.DB, path string, err error) { case httpClientType: cdb, err = http.NewWrapper(cdb) + case cliClientType: + cdb = cli.NewWrapper(cdb) + case goClientType: return @@ -288,6 +298,9 @@ func ExecuteTestCase( if goClient { clients = append(clients, goClientType) } + if cliClient { + clients = append(clients, cliClientType) + } var databases []DatabaseType if badgerInMemory { diff --git a/version/version.go b/version/version.go index a6fe7ea548..67538d302b 100644 --- a/version/version.go +++ b/version/version.go @@ -17,9 +17,9 @@ import ( "fmt" "strings" - "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core/net" + "github.com/sourcenetwork/defradb/http" ) const commitHashMaxLength = 8 From 3f9decc0844ca2485a04311c6fb0f27b690133ba Mon Sep 17 00:00:00 2001 From: Shahzad Lone Date: Mon, 2 Oct 2023 16:16:22 -0400 Subject: [PATCH 12/55] ci(i): Fix the pull_request trigger and ci badge (#1922) ## Relevant issue(s) Resolves #1921 ## Description - Fix workflow badge to the new workflow name - Fix the omitted pull_request trigger --- ...load-coverage.yml => test-and-upload-coverage.yml} | 11 ++++++++--- README.md | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) rename .github/workflows/{run-tests-and-upload-coverage.yml => test-and-upload-coverage.yml} (96%) diff --git a/.github/workflows/run-tests-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml similarity index 96% rename from .github/workflows/run-tests-and-upload-coverage.yml rename to .github/workflows/test-and-upload-coverage.yml index f1f8724ced..8c58e49bb4 100644 --- a/.github/workflows/run-tests-and-upload-coverage.yml +++ b/.github/workflows/test-and-upload-coverage.yml @@ -8,9 +8,14 @@ # by the Apache License, Version 2.0, included in the file # licenses/APL.txt. -name: Run Tests And Upload Coverage Workflow +name: Test And Upload Coverage Workflow on: + pull_request: + branches: + - master + - develop + push: tags: - 'v[0-9]+.[0-9]+.[0-9]+' @@ -80,9 +85,9 @@ jobs: name: Upload test code coverage job runs-on: ubuntu-latest - + needs: run-tests - + steps: - name: Checkout code into the directory uses: actions/checkout@v3 diff --git a/README.md b/README.md index 8428ebc77f..4d6afa6664 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Tests Workflow](https://github.com/sourcenetwork/defradb/actions/workflows/run-tests.yml/badge.svg) +![Tests Workflow](https://github.com/sourcenetwork/defradb/actions/workflows/test-and-upload-coverage.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/sourcenetwork/defradb)](https://goreportcard.com/report/github.com/sourcenetwork/defradb) [![codecov](https://codecov.io/gh/sourcenetwork/defradb/branch/develop/graph/badge.svg?token=RHAORX13PA)](https://codecov.io/gh/sourcenetwork/defradb) [![Discord](https://img.shields.io/discord/427944769851752448.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord.source.network/) From 04a20e348749f49654bae1d214c01e7ad273f7d4 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Wed, 4 Oct 2023 14:17:47 -0400 Subject: [PATCH 13/55] fix(i): Update fetcher mock (#1926) ## Relevant issue(s) Resolves #1887 ## Description Updates the fetcher mock. --- db/fetcher/mocks/fetcher.go | 130 ------------------------------------ db/fetcher/mocks/utils.go | 6 -- 2 files changed, 136 deletions(-) diff --git a/db/fetcher/mocks/fetcher.go b/db/fetcher/mocks/fetcher.go index 12bb386024..79eefefc2b 100644 --- a/db/fetcher/mocks/fetcher.go +++ b/db/fetcher/mocks/fetcher.go @@ -133,136 +133,6 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche return _c } -// FetchNextDecoded provides a mock function with given fields: ctx -func (_m *Fetcher) FetchNextDecoded(ctx context.Context) (*client.Document, fetcher.ExecInfo, error) { - ret := _m.Called(ctx) - - var r0 *client.Document - var r1 fetcher.ExecInfo - var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (*client.Document, fetcher.ExecInfo, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *client.Document); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*client.Document) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { - r1 = rf(ctx) - } else { - r1 = ret.Get(1).(fetcher.ExecInfo) - } - - if rf, ok := ret.Get(2).(func(context.Context) error); ok { - r2 = rf(ctx) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Fetcher_FetchNextDecoded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDecoded' -type Fetcher_FetchNextDecoded_Call struct { - *mock.Call -} - -// FetchNextDecoded is a helper method to define mock.On call -// - ctx context.Context -func (_e *Fetcher_Expecter) FetchNextDecoded(ctx interface{}) *Fetcher_FetchNextDecoded_Call { - return &Fetcher_FetchNextDecoded_Call{Call: _e.mock.On("FetchNextDecoded", ctx)} -} - -func (_c *Fetcher_FetchNextDecoded_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNextDecoded_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *Fetcher_FetchNextDecoded_Call) Return(_a0 *client.Document, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNextDecoded_Call { - _c.Call.Return(_a0, _a1, _a2) - return _c -} - -func (_c *Fetcher_FetchNextDecoded_Call) RunAndReturn(run func(context.Context) (*client.Document, fetcher.ExecInfo, error)) *Fetcher_FetchNextDecoded_Call { - _c.Call.Return(run) - return _c -} - -// FetchNextDoc provides a mock function with given fields: ctx, mapping -func (_m *Fetcher) FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error) { - ret := _m.Called(ctx, mapping) - - var r0 []byte - var r1 core.Doc - var r2 fetcher.ExecInfo - var r3 error - if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)); ok { - return rf(ctx, mapping) - } - if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) []byte); ok { - r0 = rf(ctx, mapping) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *core.DocumentMapping) core.Doc); ok { - r1 = rf(ctx, mapping) - } else { - r1 = ret.Get(1).(core.Doc) - } - - if rf, ok := ret.Get(2).(func(context.Context, *core.DocumentMapping) fetcher.ExecInfo); ok { - r2 = rf(ctx, mapping) - } else { - r2 = ret.Get(2).(fetcher.ExecInfo) - } - - if rf, ok := ret.Get(3).(func(context.Context, *core.DocumentMapping) error); ok { - r3 = rf(ctx, mapping) - } else { - r3 = ret.Error(3) - } - - return r0, r1, r2, r3 -} - -// Fetcher_FetchNextDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDoc' -type Fetcher_FetchNextDoc_Call struct { - *mock.Call -} - -// FetchNextDoc is a helper method to define mock.On call -// - ctx context.Context -// - mapping *core.DocumentMapping -func (_e *Fetcher_Expecter) FetchNextDoc(ctx interface{}, mapping interface{}) *Fetcher_FetchNextDoc_Call { - return &Fetcher_FetchNextDoc_Call{Call: _e.mock.On("FetchNextDoc", ctx, mapping)} -} - -func (_c *Fetcher_FetchNextDoc_Call) Run(run func(ctx context.Context, mapping *core.DocumentMapping)) *Fetcher_FetchNextDoc_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*core.DocumentMapping)) - }) - return _c -} - -func (_c *Fetcher_FetchNextDoc_Call) Return(_a0 []byte, _a1 core.Doc, _a2 fetcher.ExecInfo, _a3 error) *Fetcher_FetchNextDoc_Call { - _c.Call.Return(_a0, _a1, _a2, _a3) - return _c -} - -func (_c *Fetcher_FetchNextDoc_Call) RunAndReturn(run func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)) *Fetcher_FetchNextDoc_Call { - _c.Call.Return(run) - return _c -} - // Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) diff --git a/db/fetcher/mocks/utils.go b/db/fetcher/mocks/utils.go index 3ffe12fce2..298d5b2ad6 100644 --- a/db/fetcher/mocks/utils.go +++ b/db/fetcher/mocks/utils.go @@ -13,9 +13,6 @@ package mocks import ( "testing" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/stretchr/testify/mock" ) @@ -33,9 +30,6 @@ func NewStubbedFetcher(t *testing.T) *Fetcher { ).Maybe().Return(nil) f.EXPECT().Start(mock.Anything, mock.Anything).Maybe().Return(nil) f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, nil) - f.EXPECT().FetchNextDoc(mock.Anything, mock.Anything).Maybe(). - Return(NewEncodedDocument(t), core.Doc{}, nil) - f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(&client.Document{}, nil) f.EXPECT().Close().Maybe().Return(nil) return f } From d8466cf4063203abf598143a1a4f300cc93b81cd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:34:48 -0700 Subject: [PATCH 14/55] chore(i): Bump libp2p and suppress `pstoreds` warning (#1889) Bumps [golang.org/x/net](https://github.com/golang/net) from 0.14.0 to 0.15.0.
Commits
  • 2a0da8b go.mod: update golang.org/x dependencies
  • 97384c1 quic: remove streams from the conn when done
  • 03d5e62 http2: remove unused ClientConn.tconnClosed
  • b82f062 quic: include ignored frames in test log output
  • 7374d34 quic: don't block when closing read-only streams
  • b4d09be dns/dnsmessage: compress all names while appending to a buffer
  • 8b010a5 quic: fix race condition in runAsync test helper
  • fe2abcb quic: validate stream limits in transport params
  • d1b0a97 quic: avoid sending 1-RTT frames in initial/handshake packets
  • 4332436 quic: send more transport parameters
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/net&package-manager=go_modules&previous-version=0.14.0&new-version=0.15.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 32 +++++++++++++------------- go.sum | 65 +++++++++++++++++++++++++++-------------------------- net/node.go | 6 ++++- 3 files changed, 54 insertions(+), 49 deletions(-) diff --git a/go.mod b/go.mod index 83ee818703..0e75e2732b 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/graphql-go/graphql v0.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/iancoleman/strcase v0.3.0 - github.com/ipfs/boxo v0.12.0 + github.com/ipfs/boxo v0.13.1 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -24,13 +24,13 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 - github.com/libp2p/go-libp2p v0.29.2 + github.com/libp2p/go-libp2p v0.30.0 github.com/libp2p/go-libp2p-gostream v0.6.0 github.com/libp2p/go-libp2p-kad-dht v0.23.0 github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.10.1 + github.com/multiformats/go-multiaddr v0.11.0 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/immutable v0.3.0 @@ -47,8 +47,8 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.40.0 go.uber.org/zap v1.25.0 golang.org/x/crypto v0.13.0 - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 - golang.org/x/net v0.14.0 + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/net v0.15.0 google.golang.org/grpc v1.58.1 google.golang.org/protobuf v1.31.0 ) @@ -83,13 +83,14 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect + github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f // indirect github.com/google/uuid v1.3.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect - github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.5 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hsanjuan/ipfs-lite v1.4.1 // indirect github.com/huin/goupnp v1.2.0 // indirect @@ -132,7 +133,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.3.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect @@ -151,21 +152,20 @@ require ( github.com/multiformats/go-multistream v0.4.1 // indirect github.com/multiformats/go-varint v0.0.7 // indirect github.com/onsi/ginkgo/v2 v2.11.0 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.3.3 // indirect - github.com/quic-go/qtls-go1-20 v0.2.3 // indirect - github.com/quic-go/quic-go v0.36.4 // indirect + github.com/quic-go/qtls-go1-20 v0.3.2 // indirect + github.com/quic-go/quic-go v0.38.0 // indirect github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect @@ -191,7 +191,7 @@ require ( golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.12.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.11.0 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.13.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect diff --git a/go.sum b/go.sum index e198ec35a4..2a805ae3f8 100644 --- a/go.sum +++ b/go.sum @@ -335,8 +335,8 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f h1:pDhu5sgp8yJlEF/g6osliIIpF9K4F5jvkULXa4daRDQ= +github.com/google/pprof v0.0.0-20230821062121-407c9e7a662f/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -393,8 +393,10 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= -github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.5 h1:wW7h1TG88eUIJ2i69gaE3uNVtEPIagzhGvHgwfx2Vm4= +github.com/hashicorp/golang-lru/v2 v2.0.5/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -420,8 +422,8 @@ github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= -github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= +github.com/ipfs/boxo v0.13.1 h1:nQ5oQzcMZR3oL41REJDcTbrvDvuZh3J9ckc9+ILeRQI= +github.com/ipfs/boxo v0.13.1/go.mod h1:btrtHy0lmO1ODMECbbEY1pxNtrLilvKSYLoGQt1yYCk= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= @@ -660,8 +662,8 @@ github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xS github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.29.2 h1:uPw/c8hOxoLP/KhFnzlc5Ejqf+OmAL1dwIsqE31WBtY= -github.com/libp2p/go-libp2p v0.29.2/go.mod h1:OU7nSq0aEZMsV2wY8nXn1+XNNt9q2UiR8LjW3Kmp2UE= +github.com/libp2p/go-libp2p v0.30.0 h1:9EZwFtJPFBcs/yJTnP90TpN1hgrT/EsFfM+OZuwV87U= +github.com/libp2p/go-libp2p v0.30.0/go.mod h1:nr2g5V7lfftwgiJ78/HrID+pwvayLyqKCEirT2Y3Byg= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= @@ -847,8 +849,8 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= -github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= @@ -976,8 +978,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.10.1 h1:HghtFrWyZEPrpTvgAMFJi6gFdgHfs2cb0pyfDsk+lqU= -github.com/multiformats/go-multiaddr v0.10.1/go.mod h1:jLEZsA61rwWNZQTHHnqq2HNa+4os/Hz54eqiRnsRqYQ= +github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= +github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1056,8 +1058,9 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -1101,8 +1104,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1117,8 +1120,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1126,16 +1129,14 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= -github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= -github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.36.4 h1:CXn/ZLN5Vntlk53fjR+kUMC8Jt7flfQe+I5Ty5A+k0o= -github.com/quic-go/quic-go v0.36.4/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o= +github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= +github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc= +github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg= github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -1143,7 +1144,7 @@ github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtD github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1402,8 +1403,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1486,8 +1487,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1678,8 +1679,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/net/node.go b/net/node.go index 8f916cda16..04838641f2 100644 --- a/net/node.go +++ b/net/node.go @@ -39,10 +39,14 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" - "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + "github.com/multiformats/go-multiaddr" "github.com/textileio/go-libp2p-pubsub-rpc/finalizer" + // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 + //nolint:staticcheck + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/logging" ) From 81fb509a9f52a2778722d3ee7447a8073b5bcd30 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:38:48 -0700 Subject: [PATCH 15/55] bot: Bump combined dependencies 19-09-2023 (#1931) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by the Combine PRs action by combining the following PRs: #1930 bot: Bump postcss from 8.4.27 to 8.4.31 in /playground #1919 bot: Bump react-hook-form from 7.46.1 to 7.47.0 in /playground #1918 bot: Bump @typescript-eslint/parser from 6.7.0 to 6.7.3 in /playground #1917 bot: Bump @types/react from 18.2.21 to 18.2.24 in /playground #1910 bot: Bump github.com/libp2p/go-libp2p-kad-dht from 0.23.0 to 0.25.1 #1909 bot: Bump github.com/evanphx/json-patch/v5 from 5.6.0 to 5.7.0 #1908 bot: Bump github.com/go-errors/errors from 1.5.0 to 1.5.1 #1906 bot: Bump eslint from 8.49.0 to 8.50.0 in /playground ⚠️ The following PRs were left out due to merge conflicts: #1904 bot: Bump @vitejs/plugin-react-swc from 3.3.2 to 3.4.0 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 7 +- go.sum | 14 ++-- playground/package-lock.json | 128 +++++++++++++++++++++++++++-------- playground/package.json | 8 +-- 4 files changed, 117 insertions(+), 40 deletions(-) diff --git a/go.mod b/go.mod index 0e75e2732b..6d82464e78 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,11 @@ require ( github.com/bits-and-blooms/bitset v1.8.0 github.com/bxcodec/faker v2.0.1+incompatible github.com/dgraph-io/badger/v4 v4.1.0 - github.com/evanphx/json-patch/v5 v5.6.0 + github.com/evanphx/json-patch/v5 v5.7.0 github.com/fxamacker/cbor/v2 v2.5.0 github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/cors v1.2.1 - github.com/go-errors/errors v1.5.0 + github.com/go-errors/errors v1.5.1 github.com/gofrs/uuid/v5 v5.0.0 github.com/graphql-go/graphql v0.8.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 @@ -54,6 +54,7 @@ require ( ) require ( + github.com/Jorropo/jsync v1.0.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -129,7 +130,7 @@ require ( github.com/libp2p/go-libp2p-connmgr v0.4.0 // indirect github.com/libp2p/go-libp2p-core v0.20.0 // indirect github.com/libp2p/go-libp2p-kbucket v0.6.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.0 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.2 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect diff --git a/go.sum b/go.sum index 2a805ae3f8..15a808d5be 100644 --- a/go.sum +++ b/go.sum @@ -49,6 +49,8 @@ github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIo github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -196,8 +198,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= +github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -223,8 +225,8 @@ github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNIT github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= -github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -771,8 +773,8 @@ github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7 github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2 h1:xJMFyhQ3Iuqnk9Q2dYE1eUTzsah7NLw3Qs2zjUV78T0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.2/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= diff --git a/playground/package-lock.json b/playground/package-lock.json index dfb073f155..8188921373 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -14,15 +14,15 @@ "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.46.1" + "react-hook-form": "^7.47.0" }, "devDependencies": { - "@types/react": "^18.2.21", + "@types/react": "^18.2.24", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.0", + "@typescript-eslint/parser": "^6.7.3", "@vitejs/plugin-react-swc": "^3.0.0", - "eslint": "^8.49.0", + "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", "typescript": "^5.2.2", @@ -495,9 +495,9 @@ } }, "node_modules/@eslint/js": { - "version": "8.49.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.49.0.tgz", - "integrity": "sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==", + "version": "8.50.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.50.0.tgz", + "integrity": "sha512-NCC3zz2+nvYd+Ckfh87rA47zfu2QsQpvc6k1yzTk+b9KzRj0wkGa8LSoGOXN6Zv4lRf/EIoZ80biDh9HOI+RNQ==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -1488,9 +1488,9 @@ "devOptional": true }, "node_modules/@types/react": { - "version": "18.2.21", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.21.tgz", - "integrity": "sha512-neFKG/sBAwGxHgXiIxnbm3/AAVQ/cMRS93hvBpg8xYRbeQSPVABp9U2bRnPf0iI4+Ucdv3plSxKK+3CW2ENJxA==", + "version": "18.2.24", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.24.tgz", + "integrity": "sha512-Ee0Jt4sbJxMu1iDcetZEIKQr99J1Zfb6D4F3qfUWoR1JpInkY1Wdg4WwCyBjL257D0+jGqSl1twBjV8iCaC0Aw==", "devOptional": true, "dependencies": { "@types/prop-types": "*", @@ -1563,15 +1563,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.0.tgz", - "integrity": "sha512-jZKYwqNpNm5kzPVP5z1JXAuxjtl2uG+5NpaMocFPTNC2EdYIgbXIPImObOkhbONxtFTTdoZstLZefbaK+wXZng==", + "version": "6.7.3", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.3.tgz", + "integrity": "sha512-TlutE+iep2o7R8Lf+yoer3zU6/0EAUc8QIBB3GYBc1KGz4c4TRm83xwXUZVPlZ6YCLss4r77jbu6j3sendJoiQ==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/typescript-estree": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", + "@typescript-eslint/scope-manager": "6.7.3", + "@typescript-eslint/types": "6.7.3", + "@typescript-eslint/typescript-estree": "6.7.3", + "@typescript-eslint/visitor-keys": "6.7.3", "debug": "^4.3.4" }, "engines": { @@ -1590,6 +1590,80 @@ } } }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "6.7.3", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.3.tgz", + "integrity": "sha512-wOlo0QnEou9cHO2TdkJmzF7DFGvAKEnB82PuPNHpT8ZKKaZu6Bm63ugOTn9fXNJtvuDPanBc78lGUGGytJoVzQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.3", + "@typescript-eslint/visitor-keys": "6.7.3" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "6.7.3", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.3.tgz", + "integrity": "sha512-4g+de6roB2NFcfkZb439tigpAMnvEIg3rIjWQ+EM7IBaYt/CdJt6em9BJ4h4UpdgaBWdmx2iWsafHTrqmgIPNw==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.7.3", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.3.tgz", + "integrity": "sha512-YLQ3tJoS4VxLFYHTw21oe1/vIZPRqAO91z6Uv0Ss2BKm/Ag7/RVQBcXTGcXhgJMdA4U+HrKuY5gWlJlvoaKZ5g==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.3", + "@typescript-eslint/visitor-keys": "6.7.3", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.7.3", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.3.tgz", + "integrity": "sha512-HEVXkU9IB+nk9o63CeICMHxFWbHWr3E1mpilIQBe9+7L/lH97rleFLVtYsfnWB+JVMaiFnEaxvknvmIzX+CqVg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.3", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/scope-manager": { "version": "6.7.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.0.tgz", @@ -2069,15 +2143,15 @@ } }, "node_modules/eslint": { - "version": "8.49.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.49.0.tgz", - "integrity": "sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==", + "version": "8.50.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.50.0.tgz", + "integrity": "sha512-FOnOGSuFuFLv/Sa+FDVRZl4GGVAAFFi8LecRsI5a1tMO5HIE8nCm4ivAlzt4dT3ol/PaaGC0rJEEXQmHJBGoOg==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.2", - "@eslint/js": "8.49.0", + "@eslint/js": "8.50.0", "@humanwhocodes/config-array": "^0.11.11", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", @@ -3012,9 +3086,9 @@ } }, "node_modules/postcss": { - "version": "8.4.27", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", - "integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "funding": [ { @@ -3101,9 +3175,9 @@ } }, "node_modules/react-hook-form": { - "version": "7.46.1", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.46.1.tgz", - "integrity": "sha512-0GfI31LRTBd5tqbXMGXT1Rdsv3rnvy0FjEk8Gn9/4tp6+s77T7DPZuGEpBRXOauL+NhyGT5iaXzdIM2R6F/E+w==", + "version": "7.47.0", + "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.47.0.tgz", + "integrity": "sha512-F/TroLjTICipmHeFlMrLtNLceO2xr1jU3CyiNla5zdwsGUGu2UOxxR4UyJgLlhMwLW/Wzp4cpJ7CPfgJIeKdSg==", "engines": { "node": ">=12.22.0" }, diff --git a/playground/package.json b/playground/package.json index f5d9767dea..948f40311e 100644 --- a/playground/package.json +++ b/playground/package.json @@ -16,15 +16,15 @@ "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.46.1" + "react-hook-form": "^7.47.0" }, "devDependencies": { - "@types/react": "^18.2.21", + "@types/react": "^18.2.24", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.0", + "@typescript-eslint/parser": "^6.7.3", "@vitejs/plugin-react-swc": "^3.0.0", - "eslint": "^8.49.0", + "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", "typescript": "^5.2.2", From 1b79fb57902cdf99e97d3511c232549f750265d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 15:58:14 -0700 Subject: [PATCH 16/55] bot: Bump @vitejs/plugin-react-swc from 3.3.2 to 3.4.0 in /playground (#1904) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react-swc) from 3.3.2 to 3.4.0.
Release notes

Sourced from @​vitejs/plugin-react-swc's releases.

v3.4.0

  • Add devTarget option (fixes #141)
  • Disable Fast Refresh based on config.server.hmr === false instead of process.env.TEST
  • Warn when plugin is in WebContainers (see #118)
  • Better invalidation message when an export is added & fix HMR for export of nullish values (#143)
Changelog

Sourced from @​vitejs/plugin-react-swc's changelog.

3.4.0

  • Add devTarget option (fixes #141)
  • Disable Fast Refresh based on config.server.hmr === false instead of process.env.TEST
  • Warn when plugin is in WebContainers (see #118)
  • Better invalidation message when an export is added & fix HMR for export of nullish values (#143)
Commits
  • 39ed814 release: v3.4.0
  • bc2bf2f feat: add devTarget option (fixes #141) (#149)
  • de5993e chore(deps): update all non-major dependencies (#150)
  • 27e3854 fix(deps): update all non-major dependencies (#146)
  • 5a9bdca chore(deps): update actions/checkout action to v4 (#147)
  • 3297b1c fix(deps): update all non-major dependencies (#144)
  • 733010c fix: better invalidation message when an export is added & fix HMR for export...
  • 684c3c0 Fix tsc error display
  • 179c561 fix(deps): update all non-major dependencies (#142)
  • 2982aaa chore(deps): update all non-major dependencies (#139)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@vitejs/plugin-react-swc&package-manager=npm_and_yarn&previous-version=3.3.2&new-version=3.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 202 +++++++++++++++++++++++++++++++---- playground/package.json | 2 +- 2 files changed, 182 insertions(+), 22 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 8188921373..01f1238f69 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -21,7 +21,7 @@ "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.3", - "@vitejs/plugin-react-swc": "^3.0.0", + "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", @@ -1378,11 +1378,15 @@ } }, "node_modules/@swc/core": { - "version": "1.3.62", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.62.tgz", - "integrity": "sha512-J58hWY+/G8vOr4J6ZH9hLg0lMSijZtqIIf4HofZezGog/pVX6sJyBJ40dZ1ploFkDIlWTWvJyqtpesBKS73gkQ==", + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.91.tgz", + "integrity": "sha512-r950d0fdlZ8qbSDyvApn3HyCojiZE8xpgJzQvypeMi32dalYwugdJKWyLB55JIGMRGJ8+lmVvY4MPGkSR3kXgA==", "dev": true, "hasInstallScript": true, + "dependencies": { + "@swc/counter": "^0.1.1", + "@swc/types": "^0.1.5" + }, "engines": { "node": ">=10" }, @@ -1391,16 +1395,16 @@ "url": "https://opencollective.com/swc" }, "optionalDependencies": { - "@swc/core-darwin-arm64": "1.3.62", - "@swc/core-darwin-x64": "1.3.62", - "@swc/core-linux-arm-gnueabihf": "1.3.62", - "@swc/core-linux-arm64-gnu": "1.3.62", - "@swc/core-linux-arm64-musl": "1.3.62", - "@swc/core-linux-x64-gnu": "1.3.62", - "@swc/core-linux-x64-musl": "1.3.62", - "@swc/core-win32-arm64-msvc": "1.3.62", - "@swc/core-win32-ia32-msvc": "1.3.62", - "@swc/core-win32-x64-msvc": "1.3.62" + "@swc/core-darwin-arm64": "1.3.91", + "@swc/core-darwin-x64": "1.3.91", + "@swc/core-linux-arm-gnueabihf": "1.3.91", + "@swc/core-linux-arm64-gnu": "1.3.91", + "@swc/core-linux-arm64-musl": "1.3.91", + "@swc/core-linux-x64-gnu": "1.3.91", + "@swc/core-linux-x64-musl": "1.3.91", + "@swc/core-win32-arm64-msvc": "1.3.91", + "@swc/core-win32-ia32-msvc": "1.3.91", + "@swc/core-win32-x64-msvc": "1.3.91" }, "peerDependencies": { "@swc/helpers": "^0.5.0" @@ -1412,9 +1416,9 @@ } }, "node_modules/@swc/core-darwin-arm64": { - "version": "1.3.62", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.62.tgz", - "integrity": "sha512-MmGilibITz68LEje6vJlKzc2gUUSgzvB3wGLSjEORikTNeM7P8jXVxE4A8fgZqDeudJUm9HVWrxCV+pHDSwXhA==", + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.91.tgz", + "integrity": "sha512-7kHGiQ1he5khcEeJuHDmLZPM3rRL/ith5OTmV6bOPsoHi46kLeixORW+ts1opC3tC9vu6xbk16xgX0QAJchc1w==", "cpu": [ "arm64" ], @@ -1427,6 +1431,162 @@ "node": ">=10" } }, + "node_modules/@swc/core-darwin-x64": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.3.91.tgz", + "integrity": "sha512-8SpU18FbFpZDVzsHsAwdI1thF/picQGxq9UFxa8W+T9SDnbsqwFJv/6RqKJeJoDV6qFdl2OLjuO0OL7xrp0qnQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.91.tgz", + "integrity": "sha512-fOq4Cy8UbwX1yf0WB0d8hWZaIKCnPtPGguRqdXGLfwvhjZ9SIErT6PnmGTGRbQCNCIkOZWHKyTU0r8t2dN3haQ==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.91.tgz", + "integrity": "sha512-fki4ioRP/Esy4vdp8T34RCV+V9dqkRmOt763pf74pdiyFV2dPLXa5lnw/XvR1RTfPGknrYgjEQLCfZlReTryRw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.91.tgz", + "integrity": "sha512-XrG+DUUqNtfVLcJ20imby7fpBwQNG5VsEQBzQndSonPyUOa2YkTbBb60YDondfQGDABopuHH8gHN8o2H2/VCnQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.91.tgz", + "integrity": "sha512-d11bYhX+YPBr/Frcjc6eVn3C0LuS/9U1Li9EmQ+6s9EpYtYRl2ygSlC8eueLbaiazBnCVYFnc8bU4o0kc5B9sw==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.91.tgz", + "integrity": "sha512-2SRp5Dke2P4jCQePkDx9trkkTstnRpZJVw5r3jvYdk0zeO6iC4+ZPvvoWXJLigqQv/fZnIiSUfJ6ssOoaEqTzQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.91.tgz", + "integrity": "sha512-l9qKXikOxj42UIjbeZpz9xtBmr736jOMqInNP8mVF2/U+ws5sI8zJjcOFFtfis4ru7vWCXhB1wtltdlJYO2vGA==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.91.tgz", + "integrity": "sha512-+s+52O0QVPmzOgjEe/rcb0AK6q/J7EHKwAyJCu/FaYO9df5ovE0HJjSKP6HAF0dGPO5hkENrXuNGujofUH9vtQ==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.3.91", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.91.tgz", + "integrity": "sha512-7u9HDQhjUC3Gv43EFW84dZtduWCSa4MgltK+Sp9zEGti6WXqDPu/ESjvDsQEVYTBEMEvZs/xVAXPgLVHorV5nQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.2.tgz", + "integrity": "sha512-9F4ys4C74eSTEUNndnER3VJ15oru2NumfQxS8geE+f3eB5xvfxpWyqE5XlVnxb/R14uoXi6SLbBwwiDSkv+XEw==", + "dev": true + }, + "node_modules/@swc/types": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.5.tgz", + "integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==", + "dev": true + }, "node_modules/@tanstack/query-core": { "version": "4.35.3", "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.35.3.tgz", @@ -1791,12 +1951,12 @@ } }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.3.2.tgz", - "integrity": "sha512-VJFWY5sfoZerQRvJrh518h3AcQt6f/yTuWn4/TRB+dqmYU0NX1qz7qM5Wfd+gOQqUzQW4gxKqKN3KpE/P3+zrA==", + "version": "3.4.0", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.4.0.tgz", + "integrity": "sha512-m7UaA4Uvz82N/0EOVpZL4XsFIakRqrFKeSNxa1FBLSXGvWrWRBwmZb4qxk+ZIVAZcW3c3dn5YosomDgx62XWcQ==", "dev": true, "dependencies": { - "@swc/core": "^1.3.61" + "@swc/core": "^1.3.85" }, "peerDependencies": { "vite": "^4" diff --git a/playground/package.json b/playground/package.json index 948f40311e..ac44cf3836 100644 --- a/playground/package.json +++ b/playground/package.json @@ -23,7 +23,7 @@ "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.3", - "@vitejs/plugin-react-swc": "^3.0.0", + "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", From 85b4e27793f6861ac8061239fcefa77dbf5a2ed3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 16:19:17 -0700 Subject: [PATCH 17/55] bot: Bump @typescript-eslint/parser from 6.7.3 to 6.7.4 in /playground (#1933) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/parser](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/parser) from 6.7.3 to 6.7.4.
Release notes

Sourced from @​typescript-eslint/parser's releases.

v6.7.4

6.7.4 (2023-10-02)

Note: Version bump only for package @​typescript-eslint/typescript-eslint

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/parser's changelog.

6.7.4 (2023-10-02)

Note: Version bump only for package @​typescript-eslint/parser

You can read about our versioning strategy and releases on our website.

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/parser&package-manager=npm_and_yarn&previous-version=6.7.3&new-version=6.7.4)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 50 ++++++++++++++++++------------------ playground/package.json | 2 +- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 01f1238f69..46e988e8c4 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -20,7 +20,7 @@ "@types/react": "^18.2.24", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.3", + "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", @@ -1723,15 +1723,15 @@ } }, "node_modules/@typescript-eslint/parser": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.3.tgz", - "integrity": "sha512-TlutE+iep2o7R8Lf+yoer3zU6/0EAUc8QIBB3GYBc1KGz4c4TRm83xwXUZVPlZ6YCLss4r77jbu6j3sendJoiQ==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.4.tgz", + "integrity": "sha512-I5zVZFY+cw4IMZUeNCU7Sh2PO5O57F7Lr0uyhgCJmhN/BuTlnc55KxPonR4+EM3GBdfiCyGZye6DgMjtubQkmA==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "6.7.3", - "@typescript-eslint/types": "6.7.3", - "@typescript-eslint/typescript-estree": "6.7.3", - "@typescript-eslint/visitor-keys": "6.7.3", + "@typescript-eslint/scope-manager": "6.7.4", + "@typescript-eslint/types": "6.7.4", + "@typescript-eslint/typescript-estree": "6.7.4", + "@typescript-eslint/visitor-keys": "6.7.4", "debug": "^4.3.4" }, "engines": { @@ -1751,13 +1751,13 @@ } }, "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.3.tgz", - "integrity": "sha512-wOlo0QnEou9cHO2TdkJmzF7DFGvAKEnB82PuPNHpT8ZKKaZu6Bm63ugOTn9fXNJtvuDPanBc78lGUGGytJoVzQ==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.4.tgz", + "integrity": "sha512-SdGqSLUPTXAXi7c3Ob7peAGVnmMoGzZ361VswK2Mqf8UOYcODiYvs8rs5ILqEdfvX1lE7wEZbLyELCW+Yrql1A==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.3", - "@typescript-eslint/visitor-keys": "6.7.3" + "@typescript-eslint/types": "6.7.4", + "@typescript-eslint/visitor-keys": "6.7.4" }, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -1768,9 +1768,9 @@ } }, "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.3.tgz", - "integrity": "sha512-4g+de6roB2NFcfkZb439tigpAMnvEIg3rIjWQ+EM7IBaYt/CdJt6em9BJ4h4UpdgaBWdmx2iWsafHTrqmgIPNw==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.4.tgz", + "integrity": "sha512-o9XWK2FLW6eSS/0r/tgjAGsYasLAnOWg7hvZ/dGYSSNjCh+49k5ocPN8OmG5aZcSJ8pclSOyVKP2x03Sj+RrCA==", "dev": true, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -1781,13 +1781,13 @@ } }, "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.3.tgz", - "integrity": "sha512-YLQ3tJoS4VxLFYHTw21oe1/vIZPRqAO91z6Uv0Ss2BKm/Ag7/RVQBcXTGcXhgJMdA4U+HrKuY5gWlJlvoaKZ5g==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.4.tgz", + "integrity": "sha512-ty8b5qHKatlNYd9vmpHooQz3Vki3gG+3PchmtsA4TgrZBKWHNjWfkQid7K7xQogBqqc7/BhGazxMD5vr6Ha+iQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.3", - "@typescript-eslint/visitor-keys": "6.7.3", + "@typescript-eslint/types": "6.7.4", + "@typescript-eslint/visitor-keys": "6.7.4", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -1808,12 +1808,12 @@ } }, "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.3", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.3.tgz", - "integrity": "sha512-HEVXkU9IB+nk9o63CeICMHxFWbHWr3E1mpilIQBe9+7L/lH97rleFLVtYsfnWB+JVMaiFnEaxvknvmIzX+CqVg==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.4.tgz", + "integrity": "sha512-pOW37DUhlTZbvph50x5zZCkFn3xzwkGtNoJHzIM3svpiSkJzwOYr/kVBaXmf+RAQiUDs1AHEZVNPg6UJCJpwRA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.3", + "@typescript-eslint/types": "6.7.4", "eslint-visitor-keys": "^3.4.1" }, "engines": { diff --git a/playground/package.json b/playground/package.json index ac44cf3836..9b2c2ecc9f 100644 --- a/playground/package.json +++ b/playground/package.json @@ -22,7 +22,7 @@ "@types/react": "^18.2.24", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.3", + "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.50.0", "eslint-plugin-react-hooks": "^4.6.0", From 34457234f2a790b4dd61fe25a476e639b6f6d1d0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 4 Oct 2023 18:43:54 -0700 Subject: [PATCH 18/55] bot: Bump @types/react from 18.2.24 to 18.2.25 in /playground (#1932) Bumps [@types/react](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react) from 18.2.24 to 18.2.25.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@types/react&package-manager=npm_and_yarn&previous-version=18.2.24&new-version=18.2.25)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- playground/package-lock.json | 8 ++++---- playground/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 46e988e8c4..7f12ea0b48 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -17,7 +17,7 @@ "react-hook-form": "^7.47.0" }, "devDependencies": { - "@types/react": "^18.2.24", + "@types/react": "^18.2.25", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.4", @@ -1648,9 +1648,9 @@ "devOptional": true }, "node_modules/@types/react": { - "version": "18.2.24", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.24.tgz", - "integrity": "sha512-Ee0Jt4sbJxMu1iDcetZEIKQr99J1Zfb6D4F3qfUWoR1JpInkY1Wdg4WwCyBjL257D0+jGqSl1twBjV8iCaC0Aw==", + "version": "18.2.25", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.25.tgz", + "integrity": "sha512-24xqse6+VByVLIr+xWaQ9muX1B4bXJKXBbjszbld/UEDslGLY53+ZucF44HCmLbMPejTzGG9XgR+3m2/Wqu1kw==", "devOptional": true, "dependencies": { "@types/prop-types": "*", diff --git a/playground/package.json b/playground/package.json index 9b2c2ecc9f..faa50751ab 100644 --- a/playground/package.json +++ b/playground/package.json @@ -19,7 +19,7 @@ "react-hook-form": "^7.47.0" }, "devDependencies": { - "@types/react": "^18.2.24", + "@types/react": "^18.2.25", "@types/react-dom": "^18.2.7", "@typescript-eslint/eslint-plugin": "^6.7.0", "@typescript-eslint/parser": "^6.7.4", From 28d8260b188b8e03325daace0b4b2c050d190d86 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:18:36 -0700 Subject: [PATCH 19/55] bot: Bump combined dependencies 09-10-2023 (#1951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by the Combine PRs action by combining the following PRs: #1949 bot: Bump google.golang.org/grpc from 1.58.1 to 1.58.2 #1948 bot: Bump github.com/bits-and-blooms/bitset from 1.8.0 to 1.9.0 #1946 bot: Bump golang.org/x/crypto from 0.13.0 to 0.14.0 #1945 bot: Bump go.opentelemetry.io/otel/sdk/metric from 0.40.0 to 1.19.0 #1944 bot: Bump vite from 4.4.9 to 4.4.11 in /playground #1943 bot: Bump @types/react-dom from 18.2.7 to 18.2.11 in /playground #1942 bot: Bump @tanstack/react-query from 4.35.3 to 4.36.1 in /playground #1941 bot: Bump eslint from 8.50.0 to 8.51.0 in /playground ⚠️ The following PRs were merged/resolved manually as they had conflicts: #1947 bot: Bump golang.org/x/net from 0.15.0 to 0.16.0 #1940 bot: Bump @typescript-eslint/eslint-plugin from 6.7.0 to 6.7.4 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Shahzad Lone --- go.mod | 20 ++-- go.sum | 40 +++---- playground/package-lock.json | 198 +++++++++++------------------------ playground/package.json | 10 +- 4 files changed, 97 insertions(+), 171 deletions(-) diff --git a/go.mod b/go.mod index 6d82464e78..8489f17281 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/sourcenetwork/defradb go 1.20 require ( - github.com/bits-and-blooms/bitset v1.8.0 + github.com/bits-and-blooms/bitset v1.9.0 github.com/bxcodec/faker v2.0.1+incompatible github.com/dgraph-io/badger/v4 v4.1.0 github.com/evanphx/json-patch/v5 v5.7.0 @@ -43,13 +43,13 @@ require ( github.com/ugorji/go/codec v1.2.11 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 - go.opentelemetry.io/otel/metric v1.18.0 - go.opentelemetry.io/otel/sdk/metric v0.40.0 + go.opentelemetry.io/otel/metric v1.19.0 + go.opentelemetry.io/otel/sdk/metric v1.19.0 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.13.0 + golang.org/x/crypto v0.14.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 - golang.org/x/net v0.15.0 - google.golang.org/grpc v1.58.1 + google.golang.org/grpc v1.58.2 + golang.org/x/net v0.16.0 google.golang.org/protobuf v1.31.0 ) @@ -182,15 +182,15 @@ require ( github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.18.0 // indirect - go.opentelemetry.io/otel/sdk v1.17.0 // indirect - go.opentelemetry.io/otel/trace v1.18.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/fx v1.20.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect diff --git a/go.sum b/go.sum index 15a808d5be..a7a1f36c26 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= +github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -1322,16 +1322,16 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= -go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= -go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= -go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= -go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= -go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= -go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= -go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= -go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= -go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= +go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1393,8 +1393,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1489,8 +1489,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.15.0 h1:ugBLEUaxABaB5AJqW9enI0ACdci2RUd4eP51NTBvuJ8= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= +golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1601,8 +1601,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1791,8 +1791,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58= -google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= +google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/playground/package-lock.json b/playground/package-lock.json index 7f12ea0b48..190d10d434 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,7 +8,7 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "@tanstack/react-query": "^4.35.3", + "@tanstack/react-query": "^4.36.1", "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", "graphql": "^16.8.1", @@ -18,15 +18,15 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.7", - "@typescript-eslint/eslint-plugin": "^6.7.0", + "@types/react-dom": "^18.2.11", + "@typescript-eslint/eslint-plugin": "^6.7.4", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", - "eslint": "^8.50.0", + "eslint": "^8.51.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", "typescript": "^5.2.2", - "vite": "^4.4.9" + "vite": "^4.4.11" } }, "node_modules/@aashutoshrathi/word-wrap": { @@ -495,9 +495,9 @@ } }, "node_modules/@eslint/js": { - "version": "8.50.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.50.0.tgz", - "integrity": "sha512-NCC3zz2+nvYd+Ckfh87rA47zfu2QsQpvc6k1yzTk+b9KzRj0wkGa8LSoGOXN6Zv4lRf/EIoZ80biDh9HOI+RNQ==", + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.51.0.tgz", + "integrity": "sha512-HxjQ8Qn+4SI3/AFv6sOrDB+g6PpUTDwSJiQqOrnneEk8L71161srI9gjzzZvYVbzHiVg/BvcH95+cK/zfIt4pg==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" @@ -1588,20 +1588,20 @@ "dev": true }, "node_modules/@tanstack/query-core": { - "version": "4.35.3", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.35.3.tgz", - "integrity": "sha512-PS+WEjd9wzKTyNjjQymvcOe1yg8f3wYc6mD+vb6CKyZAKvu4sIJwryfqfBULITKCla7P9C4l5e9RXePHvZOZeQ==", + "version": "4.36.1", + "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.36.1.tgz", + "integrity": "sha512-DJSilV5+ytBP1FbFcEJovv4rnnm/CokuVvrBEtW/Va9DvuJ3HksbXUJEpI0aV1KtuL4ZoO9AVE6PyNLzF7tLeA==", "funding": { "type": "github", "url": "https://github.com/sponsors/tannerlinsley" } }, "node_modules/@tanstack/react-query": { - "version": "4.35.3", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.35.3.tgz", - "integrity": "sha512-UgTPioip/rGG3EQilXfA2j4BJkhEQsR+KAbF+KIuvQ7j4MkgnTCJF01SfRpIRNtQTlEfz/+IL7+jP8WA8bFbsw==", + "version": "4.36.1", + "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.36.1.tgz", + "integrity": "sha512-y7ySVHFyyQblPl3J3eQBWpXZkliroki3ARnBKsdJchlgt7yJLRDUcf4B8soufgiYt3pEQIkBWBx1N9/ZPIeUWw==", "dependencies": { - "@tanstack/query-core": "4.35.3", + "@tanstack/query-core": "4.36.1", "use-sync-external-store": "^1.2.0" }, "funding": { @@ -1636,9 +1636,9 @@ "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" }, "node_modules/@types/json-schema": { - "version": "7.0.12", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", - "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", + "version": "7.0.13", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.13.tgz", + "integrity": "sha512-RbSSoHliUbnXj3ny0CNFOoxrIDV6SUGyStHsvDqosw6CkdPV8TtWGlfecuK4ToyMEAql6pzNxgCFKanovUzlgQ==", "dev": true }, "node_modules/@types/prop-types": { @@ -1659,9 +1659,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.7", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.7.tgz", - "integrity": "sha512-GRaAEriuT4zp9N4p1i8BDBYmEyfo+xQ3yHjJU4eiK5NDa1RmUZG+unZABUTK4/Ox/M+GaHwb6Ow8rUITrtjszA==", + "version": "18.2.11", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.11.tgz", + "integrity": "sha512-zq6Dy0EiCuF9pWFW6I6k6W2LdpUixLE4P6XjXU1QHLfak3GPACQfLwEuHzY5pOYa4hzj1d0GxX/P141aFjZsyg==", "devOptional": true, "dependencies": { "@types/react": "*" @@ -1674,9 +1674,9 @@ "devOptional": true }, "node_modules/@types/semver": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.2.tgz", - "integrity": "sha512-7aqorHYgdNO4DM36stTiGO3DvKoex9TQRwsJU6vMaFGyqpBA1MNZkz+PG3gaNUPpTAOYhT1WR7M1JyA3fbS9Cw==", + "version": "7.5.3", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.3.tgz", + "integrity": "sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw==", "dev": true }, "node_modules/@types/tern": { @@ -1688,16 +1688,16 @@ } }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.0.tgz", - "integrity": "sha512-gUqtknHm0TDs1LhY12K2NA3Rmlmp88jK9Tx8vGZMfHeNMLE3GH2e9TRub+y+SOjuYgtOmok+wt1AyDPZqxbNag==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.4.tgz", + "integrity": "sha512-DAbgDXwtX+pDkAHwiGhqP3zWUGpW49B7eqmgpPtg+BKJXwdct79ut9+ifqOFPJGClGKSHXn2PTBatCnldJRUoA==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/type-utils": "6.7.0", - "@typescript-eslint/utils": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", + "@typescript-eslint/scope-manager": "6.7.4", + "@typescript-eslint/type-utils": "6.7.4", + "@typescript-eslint/utils": "6.7.4", + "@typescript-eslint/visitor-keys": "6.7.4", "debug": "^4.3.4", "graphemer": "^1.4.0", "ignore": "^5.2.4", @@ -1750,7 +1750,7 @@ } } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "node_modules/@typescript-eslint/scope-manager": { "version": "6.7.4", "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.4.tgz", "integrity": "sha512-SdGqSLUPTXAXi7c3Ob7peAGVnmMoGzZ361VswK2Mqf8UOYcODiYvs8rs5ILqEdfvX1lE7wEZbLyELCW+Yrql1A==", @@ -1767,88 +1767,14 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.4.tgz", - "integrity": "sha512-o9XWK2FLW6eSS/0r/tgjAGsYasLAnOWg7hvZ/dGYSSNjCh+49k5ocPN8OmG5aZcSJ8pclSOyVKP2x03Sj+RrCA==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.4.tgz", - "integrity": "sha512-ty8b5qHKatlNYd9vmpHooQz3Vki3gG+3PchmtsA4TgrZBKWHNjWfkQid7K7xQogBqqc7/BhGazxMD5vr6Ha+iQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.4", - "@typescript-eslint/visitor-keys": "6.7.4", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.4.tgz", - "integrity": "sha512-pOW37DUhlTZbvph50x5zZCkFn3xzwkGtNoJHzIM3svpiSkJzwOYr/kVBaXmf+RAQiUDs1AHEZVNPg6UJCJpwRA==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.4", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.0.tgz", - "integrity": "sha512-lAT1Uau20lQyjoLUQ5FUMSX/dS07qux9rYd5FGzKz/Kf8W8ccuvMyldb8hadHdK/qOI7aikvQWqulnEq2nCEYA==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@typescript-eslint/type-utils": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.7.0.tgz", - "integrity": "sha512-f/QabJgDAlpSz3qduCyQT0Fw7hHpmhOzY/Rv6zO3yO+HVIdPfIWhrQoAyG+uZVtWAIS85zAyzgAFfyEr+MgBpg==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.7.4.tgz", + "integrity": "sha512-n+g3zi1QzpcAdHFP9KQF+rEFxMb2KxtnJGID3teA/nxKHOVi3ylKovaqEzGBbVY2pBttU6z85gp0D00ufLzViQ==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "6.7.0", - "@typescript-eslint/utils": "6.7.0", + "@typescript-eslint/typescript-estree": "6.7.4", + "@typescript-eslint/utils": "6.7.4", "debug": "^4.3.4", "ts-api-utils": "^1.0.1" }, @@ -1869,9 +1795,9 @@ } }, "node_modules/@typescript-eslint/types": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.0.tgz", - "integrity": "sha512-ihPfvOp7pOcN/ysoj0RpBPOx3HQTJTrIN8UZK+WFd3/iDeFHHqeyYxa4hQk4rMhsz9H9mXpR61IzwlBVGXtl9Q==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.4.tgz", + "integrity": "sha512-o9XWK2FLW6eSS/0r/tgjAGsYasLAnOWg7hvZ/dGYSSNjCh+49k5ocPN8OmG5aZcSJ8pclSOyVKP2x03Sj+RrCA==", "dev": true, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -1882,13 +1808,13 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.0.tgz", - "integrity": "sha512-dPvkXj3n6e9yd/0LfojNU8VMUGHWiLuBZvbM6V6QYD+2qxqInE7J+J/ieY2iGwR9ivf/R/haWGkIj04WVUeiSQ==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.4.tgz", + "integrity": "sha512-ty8b5qHKatlNYd9vmpHooQz3Vki3gG+3PchmtsA4TgrZBKWHNjWfkQid7K7xQogBqqc7/BhGazxMD5vr6Ha+iQ==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", + "@typescript-eslint/types": "6.7.4", + "@typescript-eslint/visitor-keys": "6.7.4", "debug": "^4.3.4", "globby": "^11.1.0", "is-glob": "^4.0.3", @@ -1909,17 +1835,17 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.0.tgz", - "integrity": "sha512-MfCq3cM0vh2slSikQYqK2Gq52gvOhe57vD2RM3V4gQRZYX4rDPnKLu5p6cm89+LJiGlwEXU8hkYxhqqEC/V3qA==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.4.tgz", + "integrity": "sha512-PRQAs+HUn85Qdk+khAxsVV+oULy3VkbH3hQ8hxLRJXWBEd7iI+GbQxH5SEUSH7kbEoTp6oT1bOwyga24ELALTA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "@types/json-schema": "^7.0.12", "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/typescript-estree": "6.7.0", + "@typescript-eslint/scope-manager": "6.7.4", + "@typescript-eslint/types": "6.7.4", + "@typescript-eslint/typescript-estree": "6.7.4", "semver": "^7.5.4" }, "engines": { @@ -1934,12 +1860,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.0.tgz", - "integrity": "sha512-/C1RVgKFDmGMcVGeD8HjKv2bd72oI1KxQDeY8uc66gw9R0OK0eMq48cA+jv9/2Ag6cdrsUGySm1yzYmfz0hxwQ==", + "version": "6.7.4", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.4.tgz", + "integrity": "sha512-pOW37DUhlTZbvph50x5zZCkFn3xzwkGtNoJHzIM3svpiSkJzwOYr/kVBaXmf+RAQiUDs1AHEZVNPg6UJCJpwRA==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.0", + "@typescript-eslint/types": "6.7.4", "eslint-visitor-keys": "^3.4.1" }, "engines": { @@ -2303,15 +2229,15 @@ } }, "node_modules/eslint": { - "version": "8.50.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.50.0.tgz", - "integrity": "sha512-FOnOGSuFuFLv/Sa+FDVRZl4GGVAAFFi8LecRsI5a1tMO5HIE8nCm4ivAlzt4dT3ol/PaaGC0rJEEXQmHJBGoOg==", + "version": "8.51.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.51.0.tgz", + "integrity": "sha512-2WuxRZBrlwnXi+/vFSJyjMqrNjtJqiasMzehF0shoLaW7DzS3/9Yvrmq5JiT66+pNjiX4UBnLDiKHcWAr/OInA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", "@eslint/eslintrc": "^2.1.2", - "@eslint/js": "8.50.0", + "@eslint/js": "8.51.0", "@humanwhocodes/config-array": "^0.11.11", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", @@ -3765,9 +3691,9 @@ } }, "node_modules/vite": { - "version": "4.4.9", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.4.9.tgz", - "integrity": "sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA==", + "version": "4.4.11", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.4.11.tgz", + "integrity": "sha512-ksNZJlkcU9b0lBwAGZGGaZHCMqHsc8OpgtoYhsQ4/I2v5cnpmmmqe5pM4nv/4Hn6G/2GhTdj0DhZh2e+Er1q5A==", "dev": true, "dependencies": { "esbuild": "^0.18.10", diff --git a/playground/package.json b/playground/package.json index faa50751ab..95925b05b8 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,7 +10,7 @@ "preview": "vite preview" }, "dependencies": { - "@tanstack/react-query": "^4.35.3", + "@tanstack/react-query": "^4.36.1", "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", "graphql": "^16.8.1", @@ -20,14 +20,14 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.7", - "@typescript-eslint/eslint-plugin": "^6.7.0", + "@types/react-dom": "^18.2.11", + "@typescript-eslint/eslint-plugin": "^6.7.4", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", - "eslint": "^8.50.0", + "eslint": "^8.51.0", "eslint-plugin-react-hooks": "^4.6.0", "eslint-plugin-react-refresh": "^0.4.3", "typescript": "^5.2.2", - "vite": "^4.4.9" + "vite": "^4.4.11" } } From 281a34e6a1c261a6607bf4188695e1fa50187357 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 18:48:33 -0700 Subject: [PATCH 20/55] bot: Bump @typescript-eslint/eslint-plugin from 6.7.4 to 6.7.5 in /playground (#1953) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [@typescript-eslint/eslint-plugin](https://github.com/typescript-eslint/typescript-eslint/tree/HEAD/packages/eslint-plugin) from 6.7.4 to 6.7.5.
Release notes

Sourced from @​typescript-eslint/eslint-plugin's releases.

v6.7.5

6.7.5 (2023-10-09)

Bug Fixes

  • eslint-plugin: [prefer-string-starts-ends-with] only report slice/substring with correct range (#7712) (db40a0a)

You can read about our versioning strategy and releases on our website.

Changelog

Sourced from @​typescript-eslint/eslint-plugin's changelog.

6.7.5 (2023-10-09)

Bug Fixes

  • eslint-plugin: [prefer-string-starts-ends-with] only report slice/substring with correct range (#7712) (db40a0a)

You can read about our versioning strategy and releases on our website.

Commits
  • 36aecb6 chore: publish v6.7.5
  • db40a0a fix(eslint-plugin): [prefer-string-starts-ends-with] only report slice/substr...
  • 8f4d939 docs: fix prefer-optional-chain example for the unsafe fixes option (#7711)
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@typescript-eslint/eslint-plugin&package-manager=npm_and_yarn&previous-version=6.7.4&new-version=6.7.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 216 ++++++++++++++++++++++++++++++++--- playground/package.json | 2 +- 2 files changed, 198 insertions(+), 20 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index 190d10d434..b9625316f4 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -19,7 +19,7 @@ "devDependencies": { "@types/react": "^18.2.25", "@types/react-dom": "^18.2.11", - "@typescript-eslint/eslint-plugin": "^6.7.4", + "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.51.0", @@ -1688,16 +1688,16 @@ } }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.4.tgz", - "integrity": "sha512-DAbgDXwtX+pDkAHwiGhqP3zWUGpW49B7eqmgpPtg+BKJXwdct79ut9+ifqOFPJGClGKSHXn2PTBatCnldJRUoA==", + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.5.tgz", + "integrity": "sha512-JhtAwTRhOUcP96D0Y6KYnwig/MRQbOoLGXTON2+LlyB/N35SP9j1boai2zzwXb7ypKELXMx3DVk9UTaEq1vHEw==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.7.4", - "@typescript-eslint/type-utils": "6.7.4", - "@typescript-eslint/utils": "6.7.4", - "@typescript-eslint/visitor-keys": "6.7.4", + "@typescript-eslint/scope-manager": "6.7.5", + "@typescript-eslint/type-utils": "6.7.5", + "@typescript-eslint/utils": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5", "debug": "^4.3.4", "graphemer": "^1.4.0", "ignore": "^5.2.4", @@ -1722,6 +1722,53 @@ } } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz", + "integrity": "sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", + "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", + "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/parser": { "version": "6.7.4", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.4.tgz", @@ -1768,13 +1815,13 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.7.4.tgz", - "integrity": "sha512-n+g3zi1QzpcAdHFP9KQF+rEFxMb2KxtnJGID3teA/nxKHOVi3ylKovaqEzGBbVY2pBttU6z85gp0D00ufLzViQ==", + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.7.5.tgz", + "integrity": "sha512-Gs0qos5wqxnQrvpYv+pf3XfcRXW6jiAn9zE/K+DlmYf6FcpxeNYN0AIETaPR7rHO4K2UY+D0CIbDP9Ut0U4m1g==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "6.7.4", - "@typescript-eslint/utils": "6.7.4", + "@typescript-eslint/typescript-estree": "6.7.5", + "@typescript-eslint/utils": "6.7.5", "debug": "^4.3.4", "ts-api-utils": "^1.0.1" }, @@ -1794,6 +1841,63 @@ } } }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", + "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.5.tgz", + "integrity": "sha512-NhJiJ4KdtwBIxrKl0BqG1Ur+uw7FiOnOThcYx9DpOGJ/Abc9z2xNzLeirCG02Ig3vkvrc2qFLmYSSsaITbKjlg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", + "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/types": { "version": "6.7.4", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.4.tgz", @@ -1835,17 +1939,17 @@ } }, "node_modules/@typescript-eslint/utils": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.4.tgz", - "integrity": "sha512-PRQAs+HUn85Qdk+khAxsVV+oULy3VkbH3hQ8hxLRJXWBEd7iI+GbQxH5SEUSH7kbEoTp6oT1bOwyga24ELALTA==", + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.5.tgz", + "integrity": "sha512-pfRRrH20thJbzPPlPc4j0UNGvH1PjPlhlCMq4Yx7EGjV7lvEeGX0U6MJYe8+SyFutWgSHsdbJ3BXzZccYggezA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "@types/json-schema": "^7.0.12", "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.7.4", - "@typescript-eslint/types": "6.7.4", - "@typescript-eslint/typescript-estree": "6.7.4", + "@typescript-eslint/scope-manager": "6.7.5", + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/typescript-estree": "6.7.5", "semver": "^7.5.4" }, "engines": { @@ -1859,6 +1963,80 @@ "eslint": "^7.0.0 || ^8.0.0" } }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz", + "integrity": "sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", + "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.5.tgz", + "integrity": "sha512-NhJiJ4KdtwBIxrKl0BqG1Ur+uw7FiOnOThcYx9DpOGJ/Abc9z2xNzLeirCG02Ig3vkvrc2qFLmYSSsaITbKjlg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", + "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.7.5", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, "node_modules/@typescript-eslint/visitor-keys": { "version": "6.7.4", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.4.tgz", diff --git a/playground/package.json b/playground/package.json index 95925b05b8..606f69abfd 100644 --- a/playground/package.json +++ b/playground/package.json @@ -21,7 +21,7 @@ "devDependencies": { "@types/react": "^18.2.25", "@types/react-dom": "^18.2.11", - "@typescript-eslint/eslint-plugin": "^6.7.4", + "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.51.0", From 56dceda00ceede29897c67b90d0118bc74bf95c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 19:29:23 -0700 Subject: [PATCH 21/55] bot: Bump @types/react-dom from 18.2.11 to 18.2.12 in /playground (#1952) Bumps [@types/react-dom](https://github.com/DefinitelyTyped/DefinitelyTyped/tree/HEAD/types/react-dom) from 18.2.11 to 18.2.12.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=@types/react-dom&package-manager=npm_and_yarn&previous-version=18.2.11&new-version=18.2.12)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- playground/package-lock.json | 8 ++++---- playground/package.json | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playground/package-lock.json b/playground/package-lock.json index b9625316f4..edf9e8c1cc 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -18,7 +18,7 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.11", + "@types/react-dom": "^18.2.12", "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", @@ -1659,9 +1659,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.11.tgz", - "integrity": "sha512-zq6Dy0EiCuF9pWFW6I6k6W2LdpUixLE4P6XjXU1QHLfak3GPACQfLwEuHzY5pOYa4hzj1d0GxX/P141aFjZsyg==", + "version": "18.2.12", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", + "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", "devOptional": true, "dependencies": { "@types/react": "*" diff --git a/playground/package.json b/playground/package.json index 606f69abfd..245101f0cb 100644 --- a/playground/package.json +++ b/playground/package.json @@ -20,7 +20,7 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.11", + "@types/react-dom": "^18.2.12", "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.4", "@vitejs/plugin-react-swc": "^3.4.0", From ce7d7786ef9eedfa994446bcb620a85290553ac2 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 10 Oct 2023 09:41:50 -0700 Subject: [PATCH 22/55] fix: Node private key requires data directory (#1938) ## Relevant issue(s) N/A ## Description This PR adds an option to set the p2p host private key. When no key is set, a new one will be generated. This fixes various tests that had to manually set a data directory even in memory only mode. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Manually ran in both badger and memory mode to test key generation. Specify the platform(s) on which this was tested: - MacOS --- cli/start.go | 21 ++++++--- cli/utils.go | 38 +++++++++++++++++ net/config.go | 10 ++--- net/config_test.go | 11 +++-- net/dialer_test.go | 12 ------ net/node.go | 54 ++++------------------- net/node_test.go | 85 ------------------------------------- net/peer_test.go | 13 ------ tests/integration/state.go | 6 +++ tests/integration/utils2.go | 13 +++--- 10 files changed, 88 insertions(+), 175 deletions(-) diff --git a/cli/start.go b/cli/start.go index c3b869fbf8..2962aa45e6 100644 --- a/cli/start.go +++ b/cli/start.go @@ -17,6 +17,7 @@ import ( "net/http" "os" "os/signal" + "path/filepath" "strings" "syscall" @@ -239,12 +240,22 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { // init the p2p node var n *net.Node if !cfg.Net.P2PDisabled { - log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = net.NewNode( - ctx, - db, + nodeOpts := []net.NodeOpt{ net.WithConfig(cfg), - ) + } + if cfg.Datastore.Store == badgerDatastoreName { + // It would be ideal to not have the key path tied to the datastore. + // Running with memory store mode will always generate a random key. + // Adding support for an ephemeral mode and moving the key to the + // config would solve both of these issues. + key, err := loadOrGeneratePrivateKey(filepath.Join(cfg.Rootdir, "data", "key")) + if err != nil { + return nil, err + } + nodeOpts = append(nodeOpts, net.WithPrivateKey(key)) + } + log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) + n, err = net.NewNode(ctx, db, nodeOpts...) if err != nil { db.Close(ctx) return nil, errors.Wrap("failed to start P2P node", err) diff --git a/cli/utils.go b/cli/utils.go index b9e4d1a710..d45808145e 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -13,7 +13,9 @@ package cli import ( "context" "encoding/json" + "os" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/client" @@ -105,6 +107,42 @@ func createConfig(cfg *config.Config) error { return cfg.CreateRootDirAndConfigFile() } +// loadOrGeneratePrivateKey loads the private key from the given path +// or generates a new key and writes it to a file at the given path. +func loadOrGeneratePrivateKey(path string) (crypto.PrivKey, error) { + key, err := loadPrivateKey(path) + if err == nil { + return key, nil + } + if os.IsNotExist(err) { + return generatePrivateKey(path) + } + return nil, err +} + +// generatePrivateKey generates a new private key and writes it +// to a file at the given path. +func generatePrivateKey(path string) (crypto.PrivKey, error) { + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + if err != nil { + return nil, err + } + data, err := crypto.MarshalPrivateKey(key) + if err != nil { + return nil, err + } + return key, os.WriteFile(path, data, 0644) +} + +// loadPrivateKey reads the private key from the file at the given path. +func loadPrivateKey(path string) (crypto.PrivKey, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return crypto.UnmarshalPrivateKey(data) +} + func writeJSON(cmd *cobra.Command, out any) error { enc := json.NewEncoder(cmd.OutOrStdout()) enc.SetIndent("", " ") diff --git a/net/config.go b/net/config.go index 28fd73f25e..1055d0b1c8 100644 --- a/net/config.go +++ b/net/config.go @@ -16,6 +16,7 @@ import ( "time" cconnmgr "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/p2p/net/connmgr" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" @@ -27,7 +28,7 @@ import ( type Options struct { ListenAddrs []ma.Multiaddr TCPAddr ma.Multiaddr - DataPath string + PrivateKey crypto.PrivKey EnablePubSub bool EnableRelay bool GRPCServerOptions []grpc.ServerOption @@ -74,7 +75,6 @@ func WithConfig(cfg *config.Config) NodeOpt { } opt.EnableRelay = cfg.Net.RelayEnabled opt.EnablePubSub = cfg.Net.PubSubEnabled - opt.DataPath = cfg.Datastore.Badger.Path opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) if err != nil { return err @@ -83,10 +83,10 @@ func WithConfig(cfg *config.Config) NodeOpt { } } -// DataPath sets the data path. -func WithDataPath(path string) NodeOpt { +// WithPrivateKey sets the p2p host private key. +func WithPrivateKey(priv crypto.PrivKey) NodeOpt { return func(opt *Options) error { - opt.DataPath = path + opt.PrivateKey = priv return nil } } diff --git a/net/config_test.go b/net/config_test.go index bffc19aead..7c2b01dd06 100644 --- a/net/config_test.go +++ b/net/config_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p/core/crypto" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" @@ -65,12 +66,14 @@ func TestWithConfigWitTCPAddressError(t *testing.T) { require.Contains(t, err.Error(), "failed to parse multiaddr") } -func TestWithDataPath(t *testing.T) { - path := "test/path" - opt, err := NewMergedOptions(WithDataPath(path)) +func TestWithPrivateKey(t *testing.T) { + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + require.NoError(t, err) + + opt, err := NewMergedOptions(WithPrivateKey(key)) require.NoError(t, err) require.NotNil(t, opt) - require.Equal(t, path, opt.DataPath) + require.Equal(t, key, opt.PrivateKey) } func TestWithPubSub(t *testing.T) { diff --git a/net/dialer_test.go b/net/dialer_test.go index 5e01b2384f..58bdf44665 100644 --- a/net/dialer_test.go +++ b/net/dialer_test.go @@ -27,16 +27,12 @@ func TestDial_WithConnectedPeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) @@ -55,16 +51,12 @@ func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) @@ -86,16 +78,12 @@ func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_Closing ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) diff --git a/net/node.go b/net/node.go index 04838641f2..248650f5f2 100644 --- a/net/node.go +++ b/net/node.go @@ -19,8 +19,6 @@ package net import ( "context" "fmt" - "os" - "path/filepath" "sync" "sync/atomic" "time" @@ -96,9 +94,13 @@ func NewNode( } fin.Add(peerstore) - hostKey, err := getHostKey(options.DataPath) - if err != nil { - return nil, fin.Cleanup(err) + if options.PrivateKey == nil { + // generate an ephemeral private key + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + if err != nil { + return nil, fin.Cleanup(err) + } + options.PrivateKey = key } var ddht *dualdht.DHT @@ -106,7 +108,7 @@ func NewNode( libp2pOpts := []libp2p.Option{ libp2p.ConnectionManager(options.ConnManager), libp2p.DefaultTransports, - libp2p.Identity(hostKey), + libp2p.Identity(options.PrivateKey), libp2p.ListenAddrs(options.ListenAddrs...), libp2p.Peerstore(peerstore), libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { @@ -381,46 +383,6 @@ func (n *Node) WaitForPushLogFromPeerEvent(id peer.ID) error { } } -// replace with proper keystore -func getHostKey(keypath string) (crypto.PrivKey, error) { - // If a local datastore is used, the key is written to a file - pth := filepath.Join(keypath, "key") - _, err := os.Stat(pth) - if os.IsNotExist(err) { - key, bytes, err := newHostKey() - if err != nil { - return nil, err - } - if err := os.MkdirAll(keypath, os.ModePerm); err != nil { - return nil, err - } - if err = os.WriteFile(pth, bytes, 0400); err != nil { - return nil, err - } - return key, nil - } else if err != nil { - return nil, err - } else { - bytes, err := os.ReadFile(pth) - if err != nil { - return nil, err - } - return crypto.UnmarshalPrivateKey(bytes) - } -} - -func newHostKey() (crypto.PrivKey, []byte, error) { - priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) - if err != nil { - return nil, nil, err - } - key, err := crypto.MarshalPrivateKey(priv) - if err != nil { - return nil, nil, err - } - return priv, key, nil -} - func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, error) { dhtOpts := []dualdht.Option{ dualdht.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), diff --git a/net/node_test.go b/net/node_test.go index c622be18ca..b099e9282c 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -55,8 +55,6 @@ func TestNewNode_WithEnableRelay_NoError(t *testing.T) { context.Background(), db, WithEnableRelay(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) } @@ -70,8 +68,6 @@ func TestNewNode_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { context.Background(), db, WithListenTCPAddrString("/ip4/碎片整理"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") } @@ -85,8 +81,6 @@ func TestNewNode_WithDBClosed_NoError(t *testing.T) { _, err = NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.ErrorContains(t, err, "datastore closed") } @@ -100,8 +94,6 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { context.Background(), db, WithPubSub(false), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) require.Nil(t, n.ps) @@ -117,8 +109,6 @@ func TestNewNode_WithPubSub_NoError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -126,20 +116,6 @@ func TestNewNode_WithPubSub_NoError(t *testing.T) { require.NotNil(t, n.ps) } -func TestNewNode_WithPubSub_FailsWithoutWithDataPath(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) - require.NoError(t, err) - - _, err = NewNode( - ctx, - db, - WithPubSub(true), - ) - require.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") -} - func TestNodeClose_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) @@ -148,8 +124,6 @@ func TestNodeClose_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) err = n.Close() @@ -166,8 +140,6 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n1.Boostrap([]peer.AddrInfo{}) @@ -183,16 +155,12 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) @@ -212,16 +180,12 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) addrs, err := netutils.ParsePeers([]string{ @@ -257,8 +221,6 @@ func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { context.Background(), db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -273,8 +235,6 @@ func TestWithListenTCPAddrString_WithInvalidListenTCPAddrString_ParseError(t *te } func TestNodeConfig_NoError(t *testing.T) { - tempDir := t.TempDir() - cfg := config.DefaultConfig() cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" @@ -282,7 +242,6 @@ func TestNodeConfig_NoError(t *testing.T) { cfg.Net.RPCMaxConnectionIdle = "111s" cfg.Net.RelayEnabled = true cfg.Net.PubSubEnabled = true - cfg.Datastore.Badger.Path = tempDir configOpt := WithConfig(cfg) options, err := NewMergedOptions(configOpt) @@ -298,7 +257,6 @@ func TestNodeConfig_NoError(t *testing.T) { expectedOptions := Options{ ListenAddrs: []ma.Multiaddr{p2pAddr}, TCPAddr: tcpAddr, - DataPath: tempDir, EnablePubSub: true, EnableRelay: true, ConnManager: connManager, @@ -308,7 +266,6 @@ func TestNodeConfig_NoError(t *testing.T) { require.Equal(t, expectedOptions.ListenAddrs[k], v) } require.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) - require.Equal(t, expectedOptions.DataPath, options.DataPath) require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) } @@ -318,8 +275,6 @@ func TestSubscribeToPeerConnectionEvents_SubscriptionError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -352,8 +307,6 @@ func TestPeerConnectionEventEmitter_SingleEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -369,8 +322,6 @@ func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -389,8 +340,6 @@ func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -423,8 +372,6 @@ func TestPubSubEventEmitter_SingleEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -440,8 +387,6 @@ func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -460,8 +405,6 @@ func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -494,8 +437,6 @@ func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -511,8 +452,6 @@ func TestPushLogEventEmitter_MultiEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -531,8 +470,6 @@ func TestWaitForPeerConnectionEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -557,8 +494,6 @@ func TestWaitForPeerConnectionEvent_WithDifferentPeer_TimeoutError(t *testing.T) n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -577,8 +512,6 @@ func TestWaitForPeerConnectionEvent_WithDifferentPeerAndContextClosed_NoError(t n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -599,8 +532,6 @@ func TestWaitForPubSubEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -625,8 +556,6 @@ func TestWaitForPubSubEvent_WithDifferentPeer_TimeoutError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -645,8 +574,6 @@ func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -667,8 +594,6 @@ func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -693,8 +618,6 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -713,8 +636,6 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t * n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -735,8 +656,6 @@ func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -761,8 +680,6 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -781,8 +698,6 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) diff --git a/net/peer_test.go b/net/peer_test.go index 092e908cd2..dc6fbb4793 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -133,7 +133,6 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { ctx, db, WithConfig(cfg), - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -226,16 +225,12 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { ctx, db1, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -266,16 +261,12 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { ctx, db1, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -327,8 +318,6 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -348,8 +337,6 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) diff --git a/tests/integration/state.go b/tests/integration/state.go index 69bd65e2b5..4b48494c32 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -14,6 +14,8 @@ import ( "context" "testing" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" @@ -50,6 +52,9 @@ type state struct { // These synchronisation channels allow async actions to track their completion. syncChans []chan struct{} + // The private keys for any nodes. + nodePrivateKeys []crypto.PrivKey + // The addresses of any nodes configured. nodeAddresses []string @@ -99,6 +104,7 @@ func newState( allActionsDone: make(chan struct{}), subscriptionResultsChans: []chan func(){}, syncChans: []chan struct{}{}, + nodePrivateKeys: []crypto.PrivKey{}, nodeAddresses: []string{}, nodeConfigs: []config.Config{}, nodes: []*net.Node{}, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 420e7f4c9c..1108608ef8 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -22,6 +22,7 @@ import ( "time" badger "github.com/dgraph-io/badger/v4" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -714,6 +715,7 @@ func restartNodes( continue } + key := s.nodePrivateKeys[i] cfg := s.nodeConfigs[i] // We need to make sure the node is configured with its old address, otherwise // a new one may be selected and reconnnection to it will fail. @@ -723,6 +725,7 @@ func restartNodes( s.ctx, db, net.WithConfig(&cfg), + net.WithPrivateKey(key), ) require.NoError(s.t, err) @@ -814,20 +817,19 @@ func configureNode( } cfg := action() - // WARNING: This is a horrible hack both deduplicates/randomizes peer IDs - // And affects where libp2p(?) stores some values on the file system, even when using - // an in memory store. - cfg.Datastore.Badger.Path = s.t.TempDir() - db, path, err := GetDatabase(s) //disable change dector, or allow it? require.NoError(s.t, err) + privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + require.NoError(s.t, err) + var n *net.Node log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) n, err = net.NewNode( s.ctx, db, net.WithConfig(&cfg), + net.WithPrivateKey(privateKey), ) require.NoError(s.t, err) @@ -842,6 +844,7 @@ func configureNode( address := fmt.Sprintf("%s/p2p/%s", n.ListenAddrs()[0].String(), n.PeerID()) s.nodeAddresses = append(s.nodeAddresses, address) s.nodeConfigs = append(s.nodeConfigs, cfg) + s.nodePrivateKeys = append(s.nodePrivateKeys, privateKey) s.nodes = append(s.nodes, n) s.dbPaths = append(s.dbPaths, path) From bc4c704fb6a37c7082a0665acd77b9249169d088 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Tue, 10 Oct 2023 13:39:20 -0700 Subject: [PATCH 23/55] refactor: Remove net GRPC API (#1927) ## Relevant issue(s) N/A ## Description This PR removes the GRPC API from the net package. The HTTP and CLI interfaces now include this functionality. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- cli/p2p_collection_add.go | 25 +- cli/p2p_collection_remove.go | 25 +- cli/start.go | 57 +- client/p2p.go | 16 +- config/config.go | 61 +- config/config_test.go | 52 - config/configfile_yaml.gotmpl | 8 - db/p2p_collection_test.go | 27 +- db/txn_db.go | 66 +- go.mod | 1 - go.sum | 7 - http/client.go | 20 +- http/handler.go | 4 +- http/handler_store.go | 16 +- net/config.go | 17 - net/config_test.go | 25 - net/dag_test.go | 4 +- net/dialer_test.go | 6 +- net/node.go | 15 +- net/node_test.go | 150 +- net/pb/Makefile | 5 + net/pb/net.pb.go | 1251 +------- net/pb/net.proto | 79 - net/pb/net_grpc.pb.go | 283 +- net/pb/net_vtproto.pb.go | 2805 ++--------------- net/peer.go | 233 +- net/peer_test.go | 315 +- net/server_test.go | 46 +- tests/clients/cli/wrapper.go | 8 +- tests/clients/http/wrapper.go | 8 +- tests/integration/net/order/utils.go | 23 +- .../replicator/with_create_update_test.go | 11 +- tests/integration/p2p.go | 68 +- tests/integration/utils2.go | 2 +- 34 files changed, 760 insertions(+), 4979 deletions(-) diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index 6970e8daec..86e0d8f6f9 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -11,19 +11,38 @@ package cli import ( + "strings" + "github.com/spf13/cobra" ) func MakeP2PCollectionAddCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "add [collectionID]", + Use: "add [collectionIDs]", Short: "Add P2P collections", Long: `Add P2P collections to the synchronized pubsub topics. -The collections are synchronized between nodes of a pubsub network.`, +The collections are synchronized between nodes of a pubsub network. + +Example: add single collection + defradb client p2p collection add bae123 + +Example: add multiple collections + defradb client p2p collection add bae123,bae456 + `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { store := mustGetStoreContext(cmd) - return store.AddP2PCollection(cmd.Context(), args[0]) + + var collectionIDs []string + for _, id := range strings.Split(args[0], ",") { + id = strings.TrimSpace(id) + if id == "" { + continue + } + collectionIDs = append(collectionIDs, id) + } + + return store.AddP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index ed67f5e7c6..0c4d14effd 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -11,19 +11,38 @@ package cli import ( + "strings" + "github.com/spf13/cobra" ) func MakeP2PCollectionRemoveCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "remove [collectionID]", + Use: "remove [collectionIDs]", Short: "Remove P2P collections", Long: `Remove P2P collections from the followed pubsub topics. -The removed collections will no longer be synchronized between nodes.`, +The removed collections will no longer be synchronized between nodes. + +Example: remove single collection + defradb client p2p collection remove bae123 + +Example: remove multiple collections + defradb client p2p collection remove bae123,bae456 + `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { store := mustGetStoreContext(cmd) - return store.RemoveP2PCollection(cmd.Context(), args[0]) + + var collectionIDs []string + for _, id := range strings.Split(args[0], ",") { + id = strings.TrimSpace(id) + if id == "" { + continue + } + collectionIDs = append(collectionIDs, id) + } + + return store.RemoveP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/start.go b/cli/start.go index 2962aa45e6..f0f8b19a8a 100644 --- a/cli/start.go +++ b/cli/start.go @@ -13,7 +13,6 @@ package cli import ( "context" "fmt" - gonet "net" "net/http" "os" "os/signal" @@ -22,12 +21,7 @@ import ( "syscall" badger "github.com/dgraph-io/badger/v4" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - ma "github.com/multiformats/go-multiaddr" "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" @@ -38,7 +32,6 @@ import ( httpapi "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" - netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -114,15 +107,6 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { log.FeedbackFatalE(context.Background(), "Could not bind net.p2paddress", err) } - cmd.Flags().String( - "tcpaddr", cfg.Net.TCPAddress, - "Listener address for the tcp gRPC server (formatted as a libp2p MultiAddr)", - ) - err = cfg.BindFlag("net.tcpaddress", cmd.Flags().Lookup("tcpaddr")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.tcpaddress", err) - } - cmd.Flags().Bool( "no-p2p", cfg.Net.P2PDisabled, "Disable the peer-to-peer network synchronization system", @@ -269,7 +253,7 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - n.Boostrap(addrs) + n.Bootstrap(addrs) } if err := n.Start(); err != nil { @@ -279,45 +263,6 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { db.Close(ctx) return nil, errors.Wrap("failed to start P2P listeners", err) } - - MtcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) - if err != nil { - return nil, errors.Wrap("failed to parse multiaddress", err) - } - addr, err := netutils.TCPAddrFromMultiAddr(MtcpAddr) - if err != nil { - return nil, errors.Wrap("failed to parse TCP address", err) - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return nil, errors.Wrap("failed to parse RPC timeout duration", err) - } - - server := grpc.NewServer( - grpc.UnaryInterceptor( - grpc_middleware.ChainUnaryServer( - grpc_recovery.UnaryServerInterceptor(), - ), - ), - grpc.KeepaliveParams( - keepalive.ServerParameters{ - MaxConnectionIdle: rpcTimeoutDuration, - }, - ), - ) - tcplistener, err := gonet.Listen("tcp", addr) - if err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", addr), err) - } - - go func() { - log.FeedbackInfo(ctx, "Started RPC server", logging.NewKV("Address", addr)) - netpb.RegisterCollectionServer(server, n.Peer) - if err := server.Serve(tcplistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FeedbackFatalE(ctx, "Failed to start RPC server", err) - } - }() } sOpt := []func(*httpapi.Server){ diff --git a/client/p2p.go b/client/p2p.go index 5f864fcb9d..800b946240 100644 --- a/client/p2p.go +++ b/client/p2p.go @@ -25,15 +25,15 @@ type P2P interface { // subscribed schemas. GetAllReplicators(ctx context.Context) ([]Replicator, error) - // AddP2PCollection adds the given collection ID that the P2P system - // subscribes to to the the persisted list. It will error if the provided - // collection ID is invalid. - AddP2PCollection(ctx context.Context, collectionID string) error + // AddP2PCollections adds the given collection IDs to the P2P system and + // subscribes to their topics. It will error if any of the provided + // collection IDs are invalid. + AddP2PCollections(ctx context.Context, collectionIDs []string) error - // RemoveP2PCollection removes the given collection ID that the P2P system - // subscribes to from the the persisted list. It will error if the provided - // collection ID is invalid. - RemoveP2PCollection(ctx context.Context, collectionID string) error + // RemoveP2PCollections removes the given collection IDs from the P2P system and + // unsubscribes from their topics. It will error if the provided + // collection IDs are invalid. + RemoveP2PCollections(ctx context.Context, collectionIDs []string) error // GetAllP2PCollections returns the list of persisted collection IDs that // the P2P system subscribes to. diff --git a/config/config.go b/config/config.go index 9832a92818..3b2a212c0a 100644 --- a/config/config.go +++ b/config/config.go @@ -51,7 +51,6 @@ import ( "strconv" "strings" "text/template" - "time" "github.com/mitchellh/mapstructure" ma "github.com/multiformats/go-multiaddr" @@ -350,48 +349,28 @@ func (apicfg *APIConfig) AddressToURL() string { // NetConfig configures aspects of network and peer-to-peer. type NetConfig struct { - P2PAddress string - P2PDisabled bool - Peers string - PubSubEnabled bool `mapstructure:"pubsub"` - RelayEnabled bool `mapstructure:"relay"` - RPCAddress string - RPCMaxConnectionIdle string - RPCTimeout string - TCPAddress string + P2PAddress string + P2PDisabled bool + Peers string + PubSubEnabled bool `mapstructure:"pubsub"` + RelayEnabled bool `mapstructure:"relay"` } func defaultNetConfig() *NetConfig { return &NetConfig{ - P2PAddress: "/ip4/0.0.0.0/tcp/9171", - P2PDisabled: false, - Peers: "", - PubSubEnabled: true, - RelayEnabled: false, - RPCAddress: "0.0.0.0:9161", - RPCMaxConnectionIdle: "5m", - RPCTimeout: "10s", - TCPAddress: "/ip4/0.0.0.0/tcp/9161", + P2PAddress: "/ip4/0.0.0.0/tcp/9171", + P2PDisabled: false, + Peers: "", + PubSubEnabled: true, + RelayEnabled: false, } } func (netcfg *NetConfig) validate() error { - _, err := time.ParseDuration(netcfg.RPCTimeout) - if err != nil { - return NewErrInvalidRPCTimeout(err, netcfg.RPCTimeout) - } - _, err = time.ParseDuration(netcfg.RPCMaxConnectionIdle) - if err != nil { - return NewErrInvalidRPCMaxConnectionIdle(err, netcfg.RPCMaxConnectionIdle) - } - _, err = ma.NewMultiaddr(netcfg.P2PAddress) + _, err := ma.NewMultiaddr(netcfg.P2PAddress) if err != nil { return NewErrInvalidP2PAddress(err, netcfg.P2PAddress) } - _, err = net.ResolveTCPAddr("tcp", netcfg.RPCAddress) - if err != nil { - return NewErrInvalidRPCAddress(err, netcfg.RPCAddress) - } if len(netcfg.Peers) > 0 { peers := strings.Split(netcfg.Peers, ",") maddrs := make([]ma.Multiaddr, len(peers)) @@ -405,24 +384,6 @@ func (netcfg *NetConfig) validate() error { return nil } -// RPCTimeoutDuration gives the RPC timeout as a time.Duration. -func (netcfg *NetConfig) RPCTimeoutDuration() (time.Duration, error) { - d, err := time.ParseDuration(netcfg.RPCTimeout) - if err != nil { - return d, NewErrInvalidRPCTimeout(err, netcfg.RPCTimeout) - } - return d, nil -} - -// RPCMaxConnectionIdleDuration gives the RPC MaxConnectionIdle as a time.Duration. -func (netcfg *NetConfig) RPCMaxConnectionIdleDuration() (time.Duration, error) { - d, err := time.ParseDuration(netcfg.RPCMaxConnectionIdle) - if err != nil { - return d, NewErrInvalidRPCMaxConnectionIdle(err, netcfg.RPCMaxConnectionIdle) - } - return d, nil -} - // LogConfig configures output and logger. type LoggingConfig struct { Level string diff --git a/config/config_test.go b/config/config_test.go index b7ff295efa..e29ef8aa81 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -15,7 +15,6 @@ import ( "os" "path/filepath" "testing" - "time" "github.com/stretchr/testify/assert" ) @@ -26,8 +25,6 @@ var envVarsDifferent = map[string]string{ "DEFRA_API_ADDRESS": "localhost:9999", "DEFRA_NET_P2PDISABLED": "true", "DEFRA_NET_P2PADDRESS": "/ip4/0.0.0.0/tcp/9876", - "DEFRA_NET_RPCADDRESS": "localhost:7777", - "DEFRA_NET_RPCTIMEOUT": "90s", "DEFRA_NET_PUBSUB": "false", "DEFRA_NET_RELAY": "false", "DEFRA_LOG_LEVEL": "error", @@ -41,8 +38,6 @@ var envVarsInvalid = map[string]string{ "DEFRA_API_ADDRESS": "^=+()&**()*(&))", "DEFRA_NET_P2PDISABLED": "^=+()&**()*(&))", "DEFRA_NET_P2PADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_RPCADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_RPCTIMEOUT": "^=+()&**()*(&))", "DEFRA_NET_PUBSUB": "^=+()&**()*(&))", "DEFRA_NET_RELAY": "^=+()&**()*(&))", "DEFRA_LOG_LEVEL": "^=+()&**()*(&))", @@ -178,8 +173,6 @@ func TestEnvVariablesAllConsidered(t *testing.T) { assert.Equal(t, "memory", cfg.Datastore.Store) assert.Equal(t, true, cfg.Net.P2PDisabled) assert.Equal(t, "/ip4/0.0.0.0/tcp/9876", cfg.Net.P2PAddress) - assert.Equal(t, "localhost:7777", cfg.Net.RPCAddress) - assert.Equal(t, "90s", cfg.Net.RPCTimeout) assert.Equal(t, false, cfg.Net.PubSubEnabled) assert.Equal(t, false, cfg.Net.RelayEnabled) assert.Equal(t, "error", cfg.Log.Level) @@ -390,51 +383,6 @@ func TestValidationInvalidNetConfigPeers(t *testing.T) { assert.ErrorIs(t, err, ErrFailedToValidateConfig) } -func TestValidationInvalidRPCMaxConnectionIdle(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationInvalidRPCTimeout(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationRPCTimeoutDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "1s" - err := cfg.validate() - assert.NoError(t, err) -} - -func TestValidationInvalidRPCTimeoutDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidRPCTimeout) -} - -func TestValidationRPCMaxConnectionIdleDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "1s" - err := cfg.validate() - assert.NoError(t, err) - duration, err := cfg.Net.RPCMaxConnectionIdleDuration() - assert.NoError(t, err) - assert.Equal(t, duration, 1*time.Second) -} - -func TestValidationInvalidMaxConnectionIdleDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "*ˆ&%*&%" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidRPCMaxConnectionIdle) -} - func TestValidationInvalidLoggingConfig(t *testing.T) { cfg := DefaultConfig() cfg.Log.Level = "546578" diff --git a/config/configfile_yaml.gotmpl b/config/configfile_yaml.gotmpl index 8e011658e9..5346e41378 100644 --- a/config/configfile_yaml.gotmpl +++ b/config/configfile_yaml.gotmpl @@ -37,20 +37,12 @@ net: p2pdisabled: {{ .Net.P2PDisabled }} # Listening address of the P2P network p2paddress: {{ .Net.P2PAddress }} - # Listening address of the RPC endpoint - rpcaddress: {{ .Net.RPCAddress }} - # gRPC server address - tcpaddress: {{ .Net.TCPAddress }} - # Time duration after which a RPC connection to a peer times out - rpctimeout: {{ .Net.RPCTimeout }} # Whether the node has pubsub enabled or not pubsub: {{ .Net.PubSubEnabled }} # Enable libp2p's Circuit relay transport protocol https://docs.libp2p.io/concepts/circuit-relay/ relay: {{ .Net.RelayEnabled }} # List of peers to boostrap with, specified as multiaddresses (https://docs.libp2p.io/concepts/addressing/) peers: {{ .Net.Peers }} - # Amount of time after which an idle RPC connection would be closed - RPCMaxConnectionIdle: {{ .Net.RPCMaxConnectionIdle }} log: # Log level. Options are debug, info, error, fatal diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go index db6b1e6417..acd80bd041 100644 --- a/db/p2p_collection_test.go +++ b/db/p2p_collection_test.go @@ -63,7 +63,7 @@ func TestAddP2PCollection(t *testing.T) { col := newTestCollection(t, ctx, db, "test") - err = db.AddP2PCollection(ctx, col.SchemaID()) + err = db.AddP2PCollections(ctx, []string{col.SchemaID()}) require.NoError(t, err) } @@ -74,20 +74,16 @@ func TestGetAllP2PCollection(t *testing.T) { defer db.Close(ctx) col1 := newTestCollection(t, ctx, db, "test1") - err = db.AddP2PCollection(ctx, col1.SchemaID()) - require.NoError(t, err) - col2 := newTestCollection(t, ctx, db, "test2") - err = db.AddP2PCollection(ctx, col2.SchemaID()) - require.NoError(t, err) - col3 := newTestCollection(t, ctx, db, "test3") - err = db.AddP2PCollection(ctx, col3.SchemaID()) + + collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()} + err = db.AddP2PCollections(ctx, collectionIDs) require.NoError(t, err) collections, err := db.GetAllP2PCollections(ctx) require.NoError(t, err) - require.ElementsMatch(t, collections, []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()}) + require.ElementsMatch(t, collections, collectionIDs) } func TestRemoveP2PCollection(t *testing.T) { @@ -97,18 +93,15 @@ func TestRemoveP2PCollection(t *testing.T) { defer db.Close(ctx) col1 := newTestCollection(t, ctx, db, "test1") - err = db.AddP2PCollection(ctx, col1.SchemaID()) - require.NoError(t, err) - col2 := newTestCollection(t, ctx, db, "test2") - err = db.AddP2PCollection(ctx, col2.SchemaID()) - require.NoError(t, err) - col3 := newTestCollection(t, ctx, db, "test3") - err = db.AddP2PCollection(ctx, col3.SchemaID()) + + collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()} + + err = db.AddP2PCollections(ctx, collectionIDs) require.NoError(t, err) - err = db.RemoveP2PCollection(ctx, col2.SchemaID()) + err = db.RemoveP2PCollections(ctx, []string{col2.SchemaID()}) require.NoError(t, err) collections, err := db.GetAllP2PCollections(ctx) diff --git a/db/txn_db.go b/db/txn_db.go index b4cc32dee1..e996d9a9c8 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -121,54 +121,68 @@ func (db *explicitTxnDB) GetCollectionByVersionID( return db.getCollectionByVersionID(ctx, db.txn, schemaVersionID) } -// AddP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *implicitTxnDB) AddP2PCollection(ctx context.Context, collectionID string) error { +// AddP2PCollections adds the given collection IDs to the P2P system and +// subscribes to their topics. It will error if any of the provided +// collection IDs are invalid. +func (db *implicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.addP2PCollection(ctx, txn, collectionID) - if err != nil { - return err + for _, collectionID := range collectionIDs { + err = db.addP2PCollection(ctx, txn, collectionID) + if err != nil { + return err + } } - return txn.Commit(ctx) } -// AddP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *explicitTxnDB) AddP2PCollection(ctx context.Context, collectionID string) error { - return db.addP2PCollection(ctx, db.txn, collectionID) +// AddP2PCollections adds the given collection IDs to the P2P system and +// subscribes to their topics. It will error if any of the provided +// collection IDs are invalid. +func (db *explicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + for _, collectionID := range collectionIDs { + err := db.addP2PCollection(ctx, db.txn, collectionID) + if err != nil { + return err + } + } + return nil } -// RemoveP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *implicitTxnDB) RemoveP2PCollection(ctx context.Context, collectionID string) error { +// RemoveP2PCollections removes the given collection IDs from the P2P system and +// unsubscribes from their topics. It will error if the provided +// collection IDs are invalid. +func (db *implicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.removeP2PCollection(ctx, txn, collectionID) - if err != nil { - return err + for _, collectionID := range collectionIDs { + err = db.removeP2PCollection(ctx, txn, collectionID) + if err != nil { + return err + } } - return txn.Commit(ctx) } -// RemoveP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *explicitTxnDB) RemoveP2PCollection(ctx context.Context, collectionID string) error { - return db.removeP2PCollection(ctx, db.txn, collectionID) +// RemoveP2PCollections removes the given collection IDs from the P2P system and +// unsubscribes from their topics. It will error if the provided +// collection IDs are invalid. +func (db *explicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + for _, collectionID := range collectionIDs { + err := db.removeP2PCollection(ctx, db.txn, collectionID) + if err != nil { + return err + } + } + return nil } // GetAllCollections gets all the currently defined collections. diff --git a/go.mod b/go.mod index 8489f17281..2659ca7667 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( github.com/go-errors/errors v1.5.1 github.com/gofrs/uuid/v5 v5.0.0 github.com/graphql-go/graphql v0.8.1 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/iancoleman/strcase v0.3.0 github.com/ipfs/boxo v0.13.1 github.com/ipfs/go-block-format v0.2.0 diff --git a/go.sum b/go.sum index a7a1f36c26..34d3e57a48 100644 --- a/go.sum +++ b/go.sum @@ -233,7 +233,6 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -365,8 +364,6 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -1343,7 +1340,6 @@ go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -1358,7 +1354,6 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= @@ -1596,7 +1591,6 @@ golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1749,7 +1743,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= diff --git a/http/client.go b/http/client.go index 9dd7b7b065..79ff9e559b 100644 --- a/http/client.go +++ b/http/client.go @@ -131,10 +131,14 @@ func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, er return reps, nil } -func (c *Client) AddP2PCollection(ctx context.Context, collectionID string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections", collectionID) +func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), nil) + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) if err != nil { return err } @@ -142,10 +146,14 @@ func (c *Client) AddP2PCollection(ctx context.Context, collectionID string) erro return err } -func (c *Client) RemoveP2PCollection(ctx context.Context, collectionID string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections", collectionID) +func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) if err != nil { return err } diff --git a/http/handler.go b/http/handler.go index 242dc5938c..d8cd33c444 100644 --- a/http/handler.go +++ b/http/handler.go @@ -107,8 +107,8 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler { }) p2p.Route("/collections", func(p2p_collections chi.Router) { p2p_collections.Get("/", store_handler.GetAllP2PCollections) - p2p_collections.Post("/{id}", store_handler.AddP2PCollection) - p2p_collections.Delete("/{id}", store_handler.RemoveP2PCollection) + p2p_collections.Post("/", store_handler.AddP2PCollection) + p2p_collections.Delete("/", store_handler.RemoveP2PCollection) }) }) api.Route("/debug", func(debug chi.Router) { diff --git a/http/handler_store.go b/http/handler_store.go index 120b9f9018..93563c2f90 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -17,8 +17,6 @@ import ( "io" "net/http" - "github.com/go-chi/chi/v5" - "github.com/sourcenetwork/defradb/client" ) @@ -70,7 +68,12 @@ func (s *storeHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Reque func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - err := store.AddP2PCollection(req.Context(), chi.URLParam(req, "id")) + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := store.AddP2PCollections(req.Context(), collectionIDs) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -81,7 +84,12 @@ func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Reques func (s *storeHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - err := store.RemoveP2PCollection(req.Context(), chi.URLParam(req, "id")) + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := store.RemoveP2PCollections(req.Context(), collectionIDs) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return diff --git a/net/config.go b/net/config.go index 1055d0b1c8..4c33dc02a6 100644 --- a/net/config.go +++ b/net/config.go @@ -27,7 +27,6 @@ import ( // Options is the node options. type Options struct { ListenAddrs []ma.Multiaddr - TCPAddr ma.Multiaddr PrivateKey crypto.PrivKey EnablePubSub bool EnableRelay bool @@ -69,10 +68,6 @@ func WithConfig(cfg *config.Config) NodeOpt { if err != nil { return err } - err = WithListenTCPAddrString(cfg.Net.TCPAddress)(opt) - if err != nil { - return err - } opt.EnableRelay = cfg.Net.RelayEnabled opt.EnablePubSub = cfg.Net.PubSubEnabled opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) @@ -121,18 +116,6 @@ func WithListenP2PAddrStrings(addrs ...string) NodeOpt { } } -// ListenTCPAddrString sets the TCP address to listen on, as Multiaddr. -func WithListenTCPAddrString(addr string) NodeOpt { - return func(opt *Options) error { - a, err := ma.NewMultiaddr(addr) - if err != nil { - return err - } - opt.TCPAddr = a - return nil - } -} - // ListenAddrs sets the address to listen on given as MultiAddr(s). func WithListenAddrs(addrs ...ma.Multiaddr) NodeOpt { return func(opt *Options) error { diff --git a/net/config_test.go b/net/config_test.go index 7c2b01dd06..6f306c29ed 100644 --- a/net/config_test.go +++ b/net/config_test.go @@ -55,17 +55,6 @@ func TestWithConfigWithP2PAddressError(t *testing.T) { require.Contains(t, err.Error(), "failed to parse multiaddr") } -func TestWithConfigWitTCPAddressError(t *testing.T) { - cfg := config.Config{ - Net: &config.NetConfig{ - P2PAddress: "/ip4/0.0.0.0/tcp/9999", - TCPAddress: "/willerror/0.0.0.0/tcp/9999", - }, - } - err := WithConfig(&cfg)(&Options{}) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} - func TestWithPrivateKey(t *testing.T) { key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) require.NoError(t, err) @@ -104,20 +93,6 @@ func TestWithListenP2PAddrStrings(t *testing.T) { require.Equal(t, addr, opt.ListenAddrs[0].String()) } -func TestWithListenTCPAddrStringWithError(t *testing.T) { - addr := "/willerror/0.0.0.0/tcp/9999" - _, err := NewMergedOptions(WithListenTCPAddrString(addr)) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} - -func TestWithListenTCPAddrString(t *testing.T) { - addr := "/ip4/0.0.0.0/tcp/9999" - opt, err := NewMergedOptions(WithListenTCPAddrString(addr)) - require.NoError(t, err) - require.NotNil(t, opt) - require.Equal(t, addr, opt.TCPAddr.String()) -} - func TestWithListenAddrs(t *testing.T) { addr := "/ip4/0.0.0.0/tcp/9999" a, err := ma.NewMultiaddr(addr) diff --git a/net/dag_test.go b/net/dag_test.go index d0e9a18ce7..7373967a76 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -190,7 +190,7 @@ func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) require.NoError(t, err) - n2.Boostrap(addrs) + n2.Bootstrap(addrs) done := make(chan struct{}) go func() { @@ -268,7 +268,7 @@ func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) { addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) require.NoError(t, err) - n2.Boostrap(addrs) + n2.Bootstrap(addrs) done := make(chan struct{}) go func() { diff --git a/net/dialer_test.go b/net/dialer_test.go index 58bdf44665..d092602490 100644 --- a/net/dialer_test.go +++ b/net/dialer_test.go @@ -39,7 +39,7 @@ func TestDial_WithConnectedPeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) } @@ -63,7 +63,7 @@ func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) @@ -90,7 +90,7 @@ func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_Closing if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) diff --git a/net/node.go b/net/node.go index 248650f5f2..392267fefa 100644 --- a/net/node.go +++ b/net/node.go @@ -54,7 +54,7 @@ var evtWaitTimeout = 10 * time.Second // Node is a networked peer instance of DefraDB. type Node struct { // embed the DB interface into the node - client.DB + DB client.DB *Peer @@ -160,7 +160,6 @@ func NewNode( h, ddht, ps, - options.TCPAddr, options.GRPCServerOptions, options.GRPCDialOptions, ) @@ -192,8 +191,8 @@ func NewNode( return n, nil } -// Boostrap connects to the given peers. -func (n *Node) Boostrap(addrs []peer.AddrInfo) { +// Bootstrap connects to the given peers. +func (n *Node) Bootstrap(addrs []peer.AddrInfo) { var connected uint64 var wg sync.WaitGroup @@ -234,6 +233,14 @@ func (n *Node) PeerID() peer.ID { return n.host.ID() } +// PeerInfo returns the node's peer id and listening addresses. +func (n *Node) PeerInfo() peer.AddrInfo { + return peer.AddrInfo{ + ID: n.host.ID(), + Addrs: n.host.Addrs(), + } +} + // subscribeToPeerConnectionEvents subscribes the node to the event bus for a peer connection change. func (n *Node) subscribeToPeerConnectionEvents() { sub, err := n.host.EventBus().Subscribe(new(event.EvtPeerConnectednessChanged)) diff --git a/net/node_test.go b/net/node_test.go index b099e9282c..941d171726 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -11,7 +11,6 @@ package net import ( - "bytes" "context" "testing" "time" @@ -27,7 +26,6 @@ import ( badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/logging" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -59,19 +57,6 @@ func TestNewNode_WithEnableRelay_NoError(t *testing.T) { require.NoError(t, err) } -func TestNewNode_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) - require.NoError(t, err) - _, err = NewNode( - context.Background(), - db, - WithListenTCPAddrString("/ip4/碎片整理"), - ) - require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") -} - func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) @@ -142,7 +127,7 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), ) require.NoError(t, err) - n1.Boostrap([]peer.AddrInfo{}) + n1.Bootstrap([]peer.AddrInfo{}) } func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { @@ -167,7 +152,7 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) } func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { @@ -195,21 +180,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", }) require.NoError(t, err) - n2.Boostrap(addrs) -} - -func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { - var options Options - var nodeOpt NodeOpt - for _, opt := range append(nodeOpts, nodeOpt) { - if opt == nil { - continue - } - if err := opt(&options); err != nil { - return options, err - } - } - return options, nil + n2.Bootstrap(addrs) } func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { @@ -227,19 +198,9 @@ func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") } -func TestWithListenTCPAddrString_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { - opt := WithListenTCPAddrString("/ip4/碎片整理") - options, err := mergeOptions(opt) - require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") - require.Equal(t, Options{}, options) -} - func TestNodeConfig_NoError(t *testing.T) { cfg := config.DefaultConfig() cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" - cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" - cfg.Net.RPCTimeout = "100s" - cfg.Net.RPCMaxConnectionIdle = "111s" cfg.Net.RelayEnabled = true cfg.Net.PubSubEnabled = true @@ -250,13 +211,10 @@ func TestNodeConfig_NoError(t *testing.T) { // confirming it provides the same config as a manually constructed node.Options p2pAddr, err := ma.NewMultiaddr(cfg.Net.P2PAddress) require.NoError(t, err) - tcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) - require.NoError(t, err) connManager, err := NewConnManager(100, 400, time.Second*20) require.NoError(t, err) expectedOptions := Options{ ListenAddrs: []ma.Multiaddr{p2pAddr}, - TCPAddr: tcpAddr, EnablePubSub: true, EnableRelay: true, ConnManager: connManager, @@ -265,58 +223,11 @@ func TestNodeConfig_NoError(t *testing.T) { for k, v := range options.ListenAddrs { require.Equal(t, expectedOptions.ListenAddrs[k], v) } - require.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) + require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) } -func TestSubscribeToPeerConnectionEvents_SubscriptionError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - - n.Peer.host = &mockHost{n.Peer.host} - - n.subscribeToPeerConnectionEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to peer connectedness changed event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func TestPeerConnectionEventEmitter_SingleEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - err = emitter.Emit(event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) -} - func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( @@ -343,43 +254,9 @@ func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { ) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.Peer.host = &mockHost{n.Peer.host} n.subscribeToPubSubEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to pubsub event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func TestPubSubEventEmitter_SingleEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - ) - require.NoError(t, err) - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - err = emitter.Emit(EvtPubSub{}) - require.NoError(t, err) } func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { @@ -408,28 +285,9 @@ func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { ) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.Peer.host = &mockHost{n.Peer.host} n.subscribeToPushLogEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to push log event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") } func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { diff --git a/net/pb/Makefile b/net/pb/Makefile index 62eef77354..233665c334 100644 --- a/net/pb/Makefile +++ b/net/pb/Makefile @@ -3,6 +3,11 @@ GO = $(PB:.proto=.pb.go) all: $(GO) +deps: + go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + go install github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto@latest + %.pb.go: %.proto protoc \ --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ diff --git a/net/pb/net.pb.go b/net/pb/net.pb.go index 70daae73a7..c1a3cf3212 100644 --- a/net/pb/net.pb.go +++ b/net/pb/net.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v4.24.3 // source: net.proto package net_pb @@ -467,727 +467,33 @@ func (*GetHeadLogReply) Descriptor() ([]byte, []int) { return file_net_proto_rawDescGZIP(), []int{10} } -type SetReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` -} - -func (x *SetReplicatorRequest) Reset() { - *x = SetReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorRequest) ProtoMessage() {} - -func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{11} -} - -func (x *SetReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -func (x *SetReplicatorRequest) GetAddr() []byte { - if x != nil { - return x.Addr - } - return nil -} - -type SetReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *SetReplicatorReply) Reset() { - *x = SetReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorReply) ProtoMessage() {} - -func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{12} -} - -func (x *SetReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` - Collections []string `protobuf:"bytes,2,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *DeleteReplicatorRequest) Reset() { - *x = DeleteReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorRequest) ProtoMessage() {} - -func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{13} -} - -func (x *DeleteReplicatorRequest) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -func (x *DeleteReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type DeleteReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorReply) Reset() { - *x = DeleteReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorReply) ProtoMessage() {} - -func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{14} -} - -func (x *DeleteReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type GetAllReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllReplicatorRequest) Reset() { - *x = GetAllReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorRequest) ProtoMessage() {} - -func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{15} -} - -type GetAllReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` -} - -func (x *GetAllReplicatorReply) Reset() { - *x = GetAllReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply) ProtoMessage() {} - -func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16} -} - -func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if x != nil { - return x.Replicators - } - return nil -} - -type AddP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *AddP2PCollectionsRequest) Reset() { - *x = AddP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsRequest) ProtoMessage() {} - -func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{17} -} - -func (x *AddP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type AddP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *AddP2PCollectionsReply) Reset() { - *x = AddP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsReply) ProtoMessage() {} - -func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{18} -} - -func (x *AddP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type RemoveP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *RemoveP2PCollectionsRequest) Reset() { - *x = RemoveP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} - -func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{19} -} - -func (x *RemoveP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type RemoveP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *RemoveP2PCollectionsReply) Reset() { - *x = RemoveP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsReply) ProtoMessage() {} - -func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{20} -} - -func (x *RemoveP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type GetAllP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllP2PCollectionsRequest) Reset() { - *x = GetAllP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} - -func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{21} -} - -type GetAllP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *GetAllP2PCollectionsReply) Reset() { - *x = GetAllP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{22} -} - -func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if x != nil { - return x.Collections - } - return nil -} - -// Record is a thread record containing link data. -type Document_Log struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // block is the top-level node's raw data as an ipld.Block. - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` -} - -func (x *Document_Log) Reset() { - *x = Document_Log{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document_Log) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document_Log) ProtoMessage() {} - -func (x *Document_Log) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. -func (*Document_Log) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Document_Log) GetBlock() []byte { - if x != nil { - return x.Block - } - return nil -} - -type PushLogRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // docKey is the DocKey of the document that is affected by the log. - DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` - // cid is the CID of the composite of the document. - Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` - // schemaID is the SchemaID of the collection that the document resides in. - SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` - // creator is the PeerID of the peer that created the log. - Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` - // log hold the block that represent version of the document. - Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` -} - -func (x *PushLogRequest_Body) Reset() { - *x = PushLogRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushLogRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushLogRequest_Body) ProtoMessage() {} - -func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. -func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{7, 0} -} - -func (x *PushLogRequest_Body) GetDocKey() []byte { - if x != nil { - return x.DocKey - } - return nil -} - -func (x *PushLogRequest_Body) GetCid() []byte { - if x != nil { - return x.Cid - } - return nil -} - -func (x *PushLogRequest_Body) GetSchemaID() []byte { - if x != nil { - return x.SchemaID - } - return nil -} - -func (x *PushLogRequest_Body) GetCreator() string { - if x != nil { - return x.Creator - } - return "" -} - -func (x *PushLogRequest_Body) GetLog() *Document_Log { - if x != nil { - return x.Log - } - return nil -} - -type GetAllReplicatorReply_Replicators struct { +// Record is a thread record containing link data. +type Document_Log struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` + // block is the top-level node's raw data as an ipld.Block. + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` } -func (x *GetAllReplicatorReply_Replicators) Reset() { - *x = GetAllReplicatorReply_Replicators{} +func (x *Document_Log) Reset() { + *x = Document_Log{} if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[25] + mi := &file_net_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetAllReplicatorReply_Replicators) String() string { +func (x *Document_Log) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} +func (*Document_Log) ProtoMessage() {} -func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[25] +func (x *Document_Log) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1198,51 +504,52 @@ func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16, 0} -} - -func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if x != nil { - return x.Info - } - return nil +// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. +func (*Document_Log) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0, 0} } -func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { +func (x *Document_Log) GetBlock() []byte { if x != nil { - return x.Schemas + return x.Block } return nil } -type GetAllReplicatorReply_Replicators_Info struct { +type PushLogRequest_Body struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` + // docKey is the DocKey of the document that is affected by the log. + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` + // cid is the CID of the composite of the document. + Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` + // schemaID is the SchemaID of the collection that the document resides in. + SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` + // creator is the PeerID of the peer that created the log. + Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` + // log hold the block that represent version of the document. + Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` } -func (x *GetAllReplicatorReply_Replicators_Info) Reset() { - *x = GetAllReplicatorReply_Replicators_Info{} +func (x *PushLogRequest_Body) Reset() { + *x = PushLogRequest_Body{} if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[26] + mi := &file_net_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetAllReplicatorReply_Replicators_Info) String() string { +func (x *PushLogRequest_Body) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} +func (*PushLogRequest_Body) ProtoMessage() {} -func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[26] +func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1253,78 +560,44 @@ func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16, 0, 0} +// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. +func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7, 0} } -func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { +func (x *PushLogRequest_Body) GetDocKey() []byte { if x != nil { - return x.Id + return x.DocKey } return nil } -func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { +func (x *PushLogRequest_Body) GetCid() []byte { if x != nil { - return x.Addrs + return x.Cid } return nil } -type GetAllP2PCollectionsReply_Collection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetAllP2PCollectionsReply_Collection) Reset() { - *x = GetAllP2PCollectionsReply_Collection{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply_Collection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *PushLogRequest_Body) GetSchemaID() []byte { + if x != nil { + return x.SchemaID } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{22, 0} + return nil } -func (x *GetAllP2PCollectionsReply_Collection) GetId() string { +func (x *PushLogRequest_Body) GetCreator() string { if x != nil { - return x.Id + return x.Creator } return "" } -func (x *GetAllP2PCollectionsReply_Collection) GetName() string { +func (x *PushLogRequest_Body) GetLog() *Document_Log { if x != nil { - return x.Name + return x.Log } - return "" + return nil } var File_net_proto protoreflect.FileDescriptor @@ -1360,124 +633,30 @@ var file_net_proto_rawDesc = []byte{ 0x6c, 0x6f, 0x67, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x4c, 0x0a, 0x14, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x53, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2f, 0x0a, 0x15, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, - 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, - 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, - 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, - 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, - 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xd1, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, - 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, - 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, - 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, - 0x00, 0x12, 0x36, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, - 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, - 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, - 0x68, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, - 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, - 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, - 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x32, 0xa3, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, - 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, - 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, - 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, - 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, 0xd1, 0x02, 0x0a, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, 0x6f, + 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, + 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, + 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1b, + 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, + 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, + 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, + 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x2e, 0x6e, 0x65, + 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, + 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0a, 0x47, + 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, + 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, + 0x0a, 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1492,70 +671,40 @@ func file_net_proto_rawDescGZIP() []byte { return file_net_proto_rawDescData } -var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_net_proto_goTypes = []interface{}{ - (*Document)(nil), // 0: net.pb.Document - (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest - (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply - (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest - (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply - (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest - (*GetLogReply)(nil), // 6: net.pb.GetLogReply - (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest - (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest - (*PushLogReply)(nil), // 9: net.pb.PushLogReply - (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply - (*SetReplicatorRequest)(nil), // 11: net.pb.SetReplicatorRequest - (*SetReplicatorReply)(nil), // 12: net.pb.SetReplicatorReply - (*DeleteReplicatorRequest)(nil), // 13: net.pb.DeleteReplicatorRequest - (*DeleteReplicatorReply)(nil), // 14: net.pb.DeleteReplicatorReply - (*GetAllReplicatorRequest)(nil), // 15: net.pb.GetAllReplicatorRequest - (*GetAllReplicatorReply)(nil), // 16: net.pb.GetAllReplicatorReply - (*AddP2PCollectionsRequest)(nil), // 17: net.pb.AddP2PCollectionsRequest - (*AddP2PCollectionsReply)(nil), // 18: net.pb.AddP2PCollectionsReply - (*RemoveP2PCollectionsRequest)(nil), // 19: net.pb.RemoveP2PCollectionsRequest - (*RemoveP2PCollectionsReply)(nil), // 20: net.pb.RemoveP2PCollectionsReply - (*GetAllP2PCollectionsRequest)(nil), // 21: net.pb.GetAllP2PCollectionsRequest - (*GetAllP2PCollectionsReply)(nil), // 22: net.pb.GetAllP2PCollectionsReply - (*Document_Log)(nil), // 23: net.pb.Document.Log - (*PushLogRequest_Body)(nil), // 24: net.pb.PushLogRequest.Body - (*GetAllReplicatorReply_Replicators)(nil), // 25: net.pb.GetAllReplicatorReply.Replicators - (*GetAllReplicatorReply_Replicators_Info)(nil), // 26: net.pb.GetAllReplicatorReply.Replicators.Info - (*GetAllP2PCollectionsReply_Collection)(nil), // 27: net.pb.GetAllP2PCollectionsReply.Collection + (*Document)(nil), // 0: net.pb.Document + (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest + (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply + (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest + (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply + (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest + (*GetLogReply)(nil), // 6: net.pb.GetLogReply + (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest + (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest + (*PushLogReply)(nil), // 9: net.pb.PushLogReply + (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply + (*Document_Log)(nil), // 11: net.pb.Document.Log + (*PushLogRequest_Body)(nil), // 12: net.pb.PushLogRequest.Body } var file_net_proto_depIdxs = []int32{ - 24, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body - 25, // 1: net.pb.GetAllReplicatorReply.replicators:type_name -> net.pb.GetAllReplicatorReply.Replicators - 27, // 2: net.pb.GetAllP2PCollectionsReply.collections:type_name -> net.pb.GetAllP2PCollectionsReply.Collection - 23, // 3: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log - 26, // 4: net.pb.GetAllReplicatorReply.Replicators.info:type_name -> net.pb.GetAllReplicatorReply.Replicators.Info - 1, // 5: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest - 3, // 6: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest - 5, // 7: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest - 7, // 8: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest - 8, // 9: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest - 11, // 10: net.pb.Collection.SetReplicator:input_type -> net.pb.SetReplicatorRequest - 13, // 11: net.pb.Collection.DeleteReplicator:input_type -> net.pb.DeleteReplicatorRequest - 15, // 12: net.pb.Collection.GetAllReplicators:input_type -> net.pb.GetAllReplicatorRequest - 17, // 13: net.pb.Collection.AddP2PCollections:input_type -> net.pb.AddP2PCollectionsRequest - 19, // 14: net.pb.Collection.RemoveP2PCollections:input_type -> net.pb.RemoveP2PCollectionsRequest - 21, // 15: net.pb.Collection.GetAllP2PCollections:input_type -> net.pb.GetAllP2PCollectionsRequest - 2, // 16: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply - 4, // 17: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply - 6, // 18: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply - 9, // 19: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply - 10, // 20: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply - 12, // 21: net.pb.Collection.SetReplicator:output_type -> net.pb.SetReplicatorReply - 14, // 22: net.pb.Collection.DeleteReplicator:output_type -> net.pb.DeleteReplicatorReply - 16, // 23: net.pb.Collection.GetAllReplicators:output_type -> net.pb.GetAllReplicatorReply - 18, // 24: net.pb.Collection.AddP2PCollections:output_type -> net.pb.AddP2PCollectionsReply - 20, // 25: net.pb.Collection.RemoveP2PCollections:output_type -> net.pb.RemoveP2PCollectionsReply - 22, // 26: net.pb.Collection.GetAllP2PCollections:output_type -> net.pb.GetAllP2PCollectionsReply - 16, // [16:27] is the sub-list for method output_type - 5, // [5:16] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 12, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body + 11, // 1: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log + 1, // 2: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest + 3, // 3: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest + 5, // 4: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest + 7, // 5: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest + 8, // 6: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest + 2, // 7: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply + 4, // 8: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply + 6, // 9: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply + 9, // 10: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply + 10, // 11: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply + 7, // [7:12] is the sub-list for method output_type + 2, // [2:7] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_net_proto_init() } @@ -1697,150 +846,6 @@ func file_net_proto_init() { } } file_net_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Document_Log); i { case 0: return &v.state @@ -1852,7 +857,7 @@ func file_net_proto_init() { return nil } } - file_net_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PushLogRequest_Body); i { case 0: return &v.state @@ -1864,42 +869,6 @@ func file_net_proto_init() { return nil } } - file_net_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply_Collection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1907,9 +876,9 @@ func file_net_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_net_proto_rawDesc, NumEnums: 0, - NumMessages: 28, + NumMessages: 13, NumExtensions: 0, - NumServices: 2, + NumServices: 1, }, GoTypes: file_net_proto_goTypes, DependencyIndexes: file_net_proto_depIdxs, diff --git a/net/pb/net.proto b/net/pb/net.proto index a4799a1d89..04bea485c8 100644 --- a/net/pb/net.proto +++ b/net/pb/net.proto @@ -65,82 +65,3 @@ service Service { // GetHeadLog from this peer rpc GetHeadLog(GetHeadLogRequest) returns (GetHeadLogReply) {} } - -message SetReplicatorRequest { - repeated string collections = 1; - bytes addr = 2; -} - -message SetReplicatorReply { - bytes peerID = 1; -} - -message DeleteReplicatorRequest { - bytes peerID = 1; - repeated string collections = 2; -} - -message DeleteReplicatorReply { - bytes peerID = 1; -} - -message GetAllReplicatorRequest {} - -message GetAllReplicatorReply { - message Replicators { - message Info { - bytes id = 1; - bytes addrs = 2; - } - Info info = 1; - repeated string schemas = 2; - } - - repeated Replicators replicators = 1; - -} - -message AddP2PCollectionsRequest { - repeated string collections = 1; -} - -message AddP2PCollectionsReply { - string err = 1; -} - -message RemoveP2PCollectionsRequest { - repeated string collections = 1; -} - -message RemoveP2PCollectionsReply { - string err = 1; -} - -message GetAllP2PCollectionsRequest {} - -message GetAllP2PCollectionsReply { - message Collection { - string id = 1; - string name = 2; - } - repeated Collection collections = 1; -} - - -// Collection is the peer-to-peer network API for document sync by replication and subscription to collections -service Collection { - // SetReplicator for this peer - rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} - - // DeleteReplicator for this peer - rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} - - // DeleteReplicator for this peer - rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} - - rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} - - rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} - - rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} -} \ No newline at end of file diff --git a/net/pb/net_grpc.pb.go b/net/pb/net_grpc.pb.go index e50cbec859..c42b111148 100644 --- a/net/pb/net_grpc.pb.go +++ b/net/pb/net_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.9 +// - protoc v4.24.3 // source: net.proto package net_pb @@ -265,284 +265,3 @@ var Service_ServiceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "net.proto", } - -const ( - Collection_SetReplicator_FullMethodName = "/net.pb.Collection/SetReplicator" - Collection_DeleteReplicator_FullMethodName = "/net.pb.Collection/DeleteReplicator" - Collection_GetAllReplicators_FullMethodName = "/net.pb.Collection/GetAllReplicators" - Collection_AddP2PCollections_FullMethodName = "/net.pb.Collection/AddP2PCollections" - Collection_RemoveP2PCollections_FullMethodName = "/net.pb.Collection/RemoveP2PCollections" - Collection_GetAllP2PCollections_FullMethodName = "/net.pb.Collection/GetAllP2PCollections" -) - -// CollectionClient is the client API for Collection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type CollectionClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type collectionClient struct { - cc grpc.ClientConnInterface -} - -func NewCollectionClient(cc grpc.ClientConnInterface) CollectionClient { - return &collectionClient{cc} -} - -func (c *collectionClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, Collection_SetReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, Collection_DeleteReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, Collection_GetAllReplicators_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_AddP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_RemoveP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_GetAllP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// CollectionServer is the server API for Collection service. -// All implementations must embed UnimplementedCollectionServer -// for forward compatibility -type CollectionServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) - mustEmbedUnimplementedCollectionServer() -} - -// UnimplementedCollectionServer must be embedded to have forward compatible implementations. -type UnimplementedCollectionServer struct { -} - -func (UnimplementedCollectionServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (UnimplementedCollectionServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (UnimplementedCollectionServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (UnimplementedCollectionServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (UnimplementedCollectionServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (UnimplementedCollectionServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") -} -func (UnimplementedCollectionServer) mustEmbedUnimplementedCollectionServer() {} - -// UnsafeCollectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to CollectionServer will -// result in compilation errors. -type UnsafeCollectionServer interface { - mustEmbedUnimplementedCollectionServer() -} - -func RegisterCollectionServer(s grpc.ServiceRegistrar, srv CollectionServer) { - s.RegisterService(&Collection_ServiceDesc, srv) -} - -func _Collection_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_SetReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_DeleteReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_GetAllReplicators_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_AddP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_RemoveP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_GetAllP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Collection_ServiceDesc is the grpc.ServiceDesc for Collection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Collection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "net.pb.Collection", - HandlerType: (*CollectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Collection_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Collection_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Collection_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Collection_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Collection_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Collection_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "net.proto", -} diff --git a/net/pb/net_vtproto.pb.go b/net/pb/net_vtproto.pb.go index 9ac8b5c379..1f5b734a9d 100644 --- a/net/pb/net_vtproto.pb.go +++ b/net/pb/net_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: net.proto package net_pb @@ -516,1939 +516,190 @@ func (m *GetHeadLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + dAtA[offset] = uint8(v) + return base } - -func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Document_Log) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarint(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Block) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return dAtA[:n], nil -} - -func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Document) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Head) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetDocGraphRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetDocGraphReply) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PushDocGraphRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { +func (m *PushDocGraphReply) SizeVT() (n int) { if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + return 0 } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + var l int + _ = l + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetLogRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { +func (m *GetLogReply) SizeVT() (n int) { if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + return 0 } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + var l int + _ = l + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PushLogRequest_Body) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + l = len(m.Cid) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - if m.Info != nil { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + l = len(m.SchemaID) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + l = len(m.Creator) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Document_Log) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Block) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *Document) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DocKey) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Head) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetDocGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetDocGraphReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushDocGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushDocGraphReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushLogRequest_Body) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DocKey) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Cid) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SchemaID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Creator) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Log != nil { - l = m.Log.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *PushLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Body != nil { - l = m.Body.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetHeadLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetHeadLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Document_Log) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) - if m.Block == nil { - m.Block = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Document) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) - if m.DocKey == nil { - m.DocKey = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) - if m.Head == nil { - m.Head = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) - if m.DocKey == nil { - m.DocKey = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) - if m.Cid == nil { - m.Cid = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) - if m.SchemaID == nil { - m.SchemaID = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Log == nil { - m.Log = &Document_Log{} - } - if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Body == nil { - m.Body = &PushLogRequest_Body{} - } - if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } + if m.Log != nil { + l = m.Log.SizeVT() + n += 1 + l + sov(uint64(l)) } + n += len(m.unknownFields) + return n +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (m *PushLogRequest) SizeVT() (n int) { + if m == nil { + return 0 } - return nil + var l int + _ = l + if m.Body != nil { + l = m.Body.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n } -func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { + +func (m *GetHeadLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Document_Log) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2471,63 +722,46 @@ func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") + return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) + if m.Block == nil { + m.Block = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2550,7 +784,7 @@ func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *Document) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2573,17 +807,17 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Document: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -2593,27 +827,29 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2640,9 +876,9 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} + m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) + if m.Head == nil { + m.Head = []byte{} } iNdEx = postIndex default: @@ -2667,7 +903,7 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2690,46 +926,12 @@ func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2752,7 +954,7 @@ func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2772,81 +974,15 @@ func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { break } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2869,7 +1005,7 @@ func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2892,46 +1028,12 @@ func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2954,7 +1056,7 @@ func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2977,10 +1079,10 @@ func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -3005,7 +1107,7 @@ func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { +func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3028,80 +1130,12 @@ func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3124,7 +1158,7 @@ func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error } return nil } -func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { +func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3147,80 +1181,12 @@ func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3243,7 +1209,7 @@ func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3266,17 +1232,17 @@ func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3286,82 +1252,65 @@ func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3371,78 +1320,29 @@ func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) + if m.SchemaID == nil { + m.SchemaID = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3470,64 +1370,13 @@ func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Err = string(dAtA[iNdEx:postIndex]) + m.Creator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3537,23 +1386,27 @@ func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + if m.Log == nil { + m.Log = &Document_Log{} + } + if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3577,7 +1430,7 @@ func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { +func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3600,17 +1453,17 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3620,23 +1473,27 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Err = string(dAtA[iNdEx:postIndex]) + if m.Body == nil { + m.Body = &PushLogRequest_Body{} + } + if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3660,7 +1517,7 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3683,10 +1540,10 @@ func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -3711,7 +1568,7 @@ func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { +func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3734,76 +1591,12 @@ func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3826,7 +1619,7 @@ func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { +func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3849,46 +1642,12 @@ func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/net/peer.go b/net/peer.go index 26a24a38ae..e24d124210 100644 --- a/net/peer.go +++ b/net/peer.go @@ -32,10 +32,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" peerstore "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" - ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -86,8 +83,6 @@ type Peer struct { ctx context.Context cancel context.CancelFunc - - pb.UnimplementedCollectionServer } // NewPeer creates a new instance of the DefraDB server as a peer-to-peer node. @@ -97,7 +92,6 @@ func NewPeer( h host.Host, dht routing.Routing, ps *pubsub.PubSub, - tcpAddr ma.Multiaddr, serverOptions []grpc.ServerOption, dialOptions []grpc.DialOption, ) (*Peer, error) { @@ -313,47 +307,33 @@ func (p *Peer) RegisterNewDocument( return p.server.publishLog(p.ctx, schemaID, req) } -func marshalPeerID(id peer.ID) []byte { - b, _ := id.Marshal() // This will never return an error - return b -} - // SetReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) SetReplicator( ctx context.Context, - req *pb.SetReplicatorRequest, -) (*pb.SetReplicatorReply, error) { - addr, err := ma.NewMultiaddrBytes(req.Addr) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - + rep client.Replicator, +) error { txn, err := p.db.NewTxn(ctx, true) if err != nil { - return nil, err + return err } store := p.db.WithTxn(txn) - pid, err := p.setReplicator(ctx, store, addr, req.Collections...) + err = p.setReplicator(ctx, store, rep.Info, rep.Schemas...) if err != nil { txn.Discard(ctx) - return nil, err + return err } - return &pb.SetReplicatorReply{ - PeerID: marshalPeerID(pid), - }, txn.Commit(ctx) + return txn.Commit(ctx) } // setReplicator adds a target peer node as a replication destination for documents in our DB. func (p *Peer) setReplicator( ctx context.Context, store client.Store, - paddr ma.Multiaddr, + info peer.AddrInfo, collectionNames ...string, -) (peer.ID, error) { - var pid peer.ID - +) error { // verify collections collections := []client.Collection{} schemas := []string{} @@ -361,7 +341,7 @@ func (p *Peer) setReplicator( var err error collections, err = store.GetAllCollections(ctx) if err != nil { - return pid, errors.Wrap("failed to get all collections for replicator", err) + return errors.Wrap("failed to get all collections for replicator", err) } for _, col := range collections { schemas = append(schemas, col.SchemaID()) @@ -370,34 +350,19 @@ func (p *Peer) setReplicator( for _, cName := range collectionNames { col, err := store.GetCollectionByName(ctx, cName) if err != nil { - return pid, errors.Wrap("failed to get collection for replicator", err) + return errors.Wrap("failed to get collection for replicator", err) } collections = append(collections, col) schemas = append(schemas, col.SchemaID()) } } - // extra peerID - // Extract peer portion - p2p, err := paddr.ValueForProtocol(ma.P_P2P) - if err != nil { - return pid, err - } - pid, err = peer.Decode(p2p) - if err != nil { - return pid, err - } - // make sure it's not ourselves - if pid == p.host.ID() { - return pid, errors.New("can't target ourselves as a replicator") + if info.ID == p.host.ID() { + return errors.New("can't target ourselves as a replicator") } - - // add peer to peerstore - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(paddr) - if err != nil { - return pid, errors.Wrap(fmt.Sprintf("Failed to address info from %s", paddr), err) + if err := info.ID.Validate(); err != nil { + return err } // Add the destination's peer multiaddress in the peerstore. @@ -408,36 +373,36 @@ func (p *Peer) setReplicator( p.mu.Lock() for _, col := range collections { if reps, exists := p.replicators[col.SchemaID()]; exists { - if _, exists := reps[pid]; exists { + if _, exists := reps[info.ID]; exists { p.mu.Unlock() - return pid, errors.New(fmt.Sprintf( + return errors.New(fmt.Sprintf( "Replicator already exists for %s with PeerID %s", col.Name(), - pid, + info.ID, )) } } else { p.replicators[col.SchemaID()] = make(map[peer.ID]struct{}) } // add to replicators list for the collection - p.replicators[col.SchemaID()][pid] = struct{}{} + p.replicators[col.SchemaID()][info.ID] = struct{}{} } p.mu.Unlock() // Persist peer in datastore - err = p.db.SetReplicator(ctx, client.Replicator{ - Info: *info, + err := p.db.SetReplicator(ctx, client.Replicator{ + Info: info, Schemas: schemas, }) if err != nil { - return pid, errors.Wrap("failed to persist replicator", err) + return errors.Wrap("failed to persist replicator", err) } for _, col := range collections { // create read only txn and assign to col txn, err := p.db.NewTxn(ctx, true) if err != nil { - return pid, errors.Wrap("failed to get txn", err) + return errors.Wrap("failed to get txn", err) } col = col.WithTxn(txn) @@ -445,19 +410,19 @@ func (p *Peer) setReplicator( keysCh, err := col.GetAllDocKeys(ctx) if err != nil { txn.Discard(ctx) - return pid, errors.Wrap( + return errors.Wrap( fmt.Sprintf( "Failed to get dockey for replicator %s on %s", - pid, + info.ID, col.Name(), ), err, ) } - p.pushToReplicator(ctx, txn, col, keysCh, pid) + p.pushToReplicator(ctx, txn, col, keysCh, info.ID) } - return pid, nil + return nil } func (p *Peer) pushToReplicator( @@ -529,37 +494,38 @@ func (p *Peer) pushToReplicator( // DeleteReplicator removes a peer node from the replicators. func (p *Peer) DeleteReplicator( ctx context.Context, - req *pb.DeleteReplicatorRequest, -) (*pb.DeleteReplicatorReply, error) { + rep client.Replicator, +) error { log.Debug(ctx, "Received DeleteReplicator request") txn, err := p.db.NewTxn(ctx, true) if err != nil { - return nil, err + return err } store := p.db.WithTxn(txn) - err = p.deleteReplicator(ctx, store, peer.ID(req.PeerID), req.Collections...) + err = p.deleteReplicator(ctx, store, rep.Info, rep.Schemas...) if err != nil { txn.Discard(ctx) - return nil, err + return err } - return &pb.DeleteReplicatorReply{ - PeerID: req.PeerID, - }, txn.Commit(ctx) + return txn.Commit(ctx) } func (p *Peer) deleteReplicator( ctx context.Context, store client.Store, - pid peer.ID, + info peer.AddrInfo, collectionNames ...string, ) error { // make sure it's not ourselves - if pid == p.host.ID() { + if info.ID == p.host.ID() { return ErrSelfTargetForReplicator } + if err := info.ID.Validate(); err != nil { + return err + } // verify collections schemas := []string{} @@ -591,9 +557,9 @@ func (p *Peer) deleteReplicator( totalSchemas := 0 // Lets keep track of how many schemas are left for the replicator. for schema, rep := range p.replicators { - if _, exists := rep[pid]; exists { + if _, exists := rep[info.ID]; exists { if _, toDelete := schemaMap[schema]; toDelete { - delete(p.replicators[schema], pid) + delete(p.replicators[schema], info.ID) } else { totalSchemas++ } @@ -602,42 +568,21 @@ func (p *Peer) deleteReplicator( if totalSchemas == 0 { // Remove the destination's peer multiaddress in the peerstore. - p.host.Peerstore().ClearAddrs(pid) + p.host.Peerstore().ClearAddrs(info.ID) } // Delete peer in datastore return p.db.DeleteReplicator(ctx, client.Replicator{ - Info: peer.AddrInfo{ID: pid}, + Info: peer.AddrInfo{ID: info.ID}, Schemas: schemas, }) } // GetAllReplicators returns all replicators and the schemas that are replicated to them. -func (p *Peer) GetAllReplicators( - ctx context.Context, - req *pb.GetAllReplicatorRequest, -) (*pb.GetAllReplicatorReply, error) { +func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { log.Debug(ctx, "Received GetAllReplicators request") - reps, err := p.db.GetAllReplicators(ctx) - if err != nil { - return nil, err - } - - pbReps := []*pb.GetAllReplicatorReply_Replicators{} - for _, rep := range reps { - pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ - Info: &pb.GetAllReplicatorReply_Replicators_Info{ - Id: []byte(rep.Info.ID), - Addrs: rep.Info.Addrs[0].Bytes(), - }, - Schemas: rep.Schemas, - }) - } - - return &pb.GetAllReplicatorReply{ - Replicators: pbReps, - }, nil + return p.db.GetAllReplicators(ctx) } func (p *Peer) loadReplicators(ctx context.Context) error { @@ -850,50 +795,48 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { return cause } -// AddP2PCollections adds the given collectionIDs to the pubsup topics. +// AddP2PCollection adds the given collectionID to the pubsup topics. // -// It will error if any of the given collectionIDs are invalid, in such a case some of the +// It will error if the given collectionID is invalid, in such a case some of the // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. func (p *Peer) AddP2PCollections( ctx context.Context, - req *pb.AddP2PCollectionsRequest, -) (*pb.AddP2PCollectionsReply, error) { + collectionIDs []string, +) error { log.Debug(ctx, "Received AddP2PCollections request") txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return nil, err + return err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range req.Collections { + for _, col := range collectionIDs { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return nil, err + return err } storeCollections = append(storeCollections, storeCol) } // Ensure we can add all the collections to the store on the transaction // before adding to topics. - for _, col := range req.Collections { - err := store.AddP2PCollection(p.ctx, col) - if err != nil { - return nil, err - } + err = store.AddP2PCollections(p.ctx, collectionIDs) + if err != nil { + return err } // Add pubsub topics and remove them if we get an error. addedTopics := []string{} - for _, col := range req.Collections { + for _, col := range collectionIDs { err = p.server.addPubSubTopic(col, true) if err != nil { - return nil, p.rollbackAddPubSubTopics(addedTopics, err) + return p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, col) } @@ -904,12 +847,12 @@ func (p *Peer) AddP2PCollections( for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return nil, err + return err } for key := range keyChan { err := p.server.removePubSubTopic(key.Key.String()) if err != nil { - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) + return p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, key.Key.String()) } @@ -917,56 +860,54 @@ func (p *Peer) AddP2PCollections( if err = txn.Commit(p.ctx); err != nil { err = p.rollbackRemovePubSubTopics(removedTopics, err) - return nil, p.rollbackAddPubSubTopics(addedTopics, err) + return p.rollbackAddPubSubTopics(addedTopics, err) } - return &pb.AddP2PCollectionsReply{}, nil + return nil } -// RemoveP2PCollections removes the given collectionIDs from the pubsup topics. +// RemoveP2PCollection removes the given collectionID from the pubsup topics. // -// It will error if any of the given collectionIDs are invalid, in such a case some of the +// It will error if the given collectionID is invalid, in such a case some of the // changes to the server may still be applied. // // WARNING: Calling this on collections with a large number of documents may take a long time to process. func (p *Peer) RemoveP2PCollections( ctx context.Context, - req *pb.RemoveP2PCollectionsRequest, -) (*pb.RemoveP2PCollectionsReply, error) { + collectionIDs []string, +) error { log.Debug(ctx, "Received RemoveP2PCollections request") txn, err := p.db.NewTxn(p.ctx, false) if err != nil { - return nil, err + return err } defer txn.Discard(p.ctx) store := p.db.WithTxn(txn) // first let's make sure the collections actually exists storeCollections := []client.Collection{} - for _, col := range req.Collections { + for _, col := range collectionIDs { storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) if err != nil { - return nil, err + return err } storeCollections = append(storeCollections, storeCol) } // Ensure we can remove all the collections to the store on the transaction // before adding to topics. - for _, col := range req.Collections { - err := store.RemoveP2PCollection(p.ctx, col) - if err != nil { - return nil, err - } + err = store.RemoveP2PCollections(p.ctx, collectionIDs) + if err != nil { + return err } // Remove pubsub topics and add them back if we get an error. removedTopics := []string{} - for _, col := range req.Collections { + for _, col := range collectionIDs { err = p.server.removePubSubTopic(col) if err != nil { - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) + return p.rollbackRemovePubSubTopics(removedTopics, err) } removedTopics = append(removedTopics, col) } @@ -977,12 +918,12 @@ func (p *Peer) RemoveP2PCollections( for _, col := range storeCollections { keyChan, err := col.GetAllDocKeys(p.ctx) if err != nil { - return nil, err + return err } for key := range keyChan { err := p.server.addPubSubTopic(key.Key.String(), true) if err != nil { - return nil, p.rollbackAddPubSubTopics(addedTopics, err) + return p.rollbackAddPubSubTopics(addedTopics, err) } addedTopics = append(addedTopics, key.Key.String()) } @@ -990,17 +931,14 @@ func (p *Peer) RemoveP2PCollections( if err = txn.Commit(p.ctx); err != nil { err = p.rollbackAddPubSubTopics(addedTopics, err) - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) + return p.rollbackRemovePubSubTopics(removedTopics, err) } - return &pb.RemoveP2PCollectionsReply{}, nil + return nil } // GetAllP2PCollections gets all the collectionIDs from the pubsup topics -func (p *Peer) GetAllP2PCollections( - ctx context.Context, - req *pb.GetAllP2PCollectionsRequest, -) (*pb.GetAllP2PCollectionsReply, error) { +func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { log.Debug(ctx, "Received GetAllP2PCollections request") txn, err := p.db.NewTxn(p.ctx, false) @@ -1009,26 +947,11 @@ func (p *Peer) GetAllP2PCollections( } store := p.db.WithTxn(txn) - collections, err := p.db.GetAllP2PCollections(p.ctx) + collections, err := store.GetAllP2PCollections(p.ctx) if err != nil { txn.Discard(p.ctx) return nil, err } - pbCols := []*pb.GetAllP2PCollectionsReply_Collection{} - for _, colID := range collections { - col, err := store.GetCollectionBySchemaID(p.ctx, colID) - if err != nil { - txn.Discard(p.ctx) - return nil, err - } - pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ - Id: colID, - Name: col.Name(), - }) - } - - return &pb.GetAllP2PCollectionsReply{ - Collections: pbCols, - }, txn.Commit(p.ctx) + return collections, txn.Commit(p.ctx) } diff --git a/net/peer_test.go b/net/peer_test.go index dc6fbb4793..15a4a2e55a 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -11,7 +11,6 @@ package net import ( - "bytes" "context" "testing" "time" @@ -22,9 +21,7 @@ import ( libp2p "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" rpc "github.com/textileio/go-libp2p-pubsub-rpc" @@ -35,8 +32,6 @@ import ( "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/logging" - pb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -126,8 +121,6 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { cfg := config.DefaultConfig() cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr n, err := NewNode( ctx, @@ -148,7 +141,7 @@ func TestNewPeer_NoError(t *testing.T) { h, err := libp2p.New() require.NoError(t, err) - _, err = NewPeer(ctx, db, h, nil, nil, nil, nil, nil) + _, err = NewPeer(ctx, db, h, nil, nil, nil, nil) require.NoError(t, err) } @@ -158,7 +151,7 @@ func TestNewPeer_NoDB_NilDBError(t *testing.T) { h, err := libp2p.New() require.NoError(t, err) - _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil, nil) + _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil) require.ErrorIs(t, err, ErrNilDB) } @@ -197,7 +190,7 @@ func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { _, err = rpc.NewTopic(ctx, ps, h.ID(), doc.Key().String(), true) require.NoError(t, err) - _, err = NewPeer(ctx, db, h, nil, ps, nil, nil, nil) + _, err = NewPeer(ctx, db, h, nil, ps, nil, nil) require.ErrorContains(t, err, "topic already exists") } @@ -238,7 +231,7 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) err = n2.Start() require.NoError(t, err) @@ -274,13 +267,7 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) - - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) + n2.Bootstrap(addrs) err = n1.Close() require.NoError(t, err) @@ -291,19 +278,6 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "Failure while reconnecting to a known peer", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") - db1.Close(ctx) db2.Close(ctx) } @@ -407,20 +381,17 @@ func TestSetReplicator_NoError(t *testing.T) { }`) require.NoError(t, err) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) require.NoError(t, err) } -func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { +func TestSetReplicator_WithInvalidAddress_EmptyPeerIDError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) @@ -430,14 +401,11 @@ func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { }`) require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: []byte("/some/invalid/address"), - Collections: []string{"User"}, - }, - ) - require.ErrorContains(t, err, "InvalidArgument") + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{}, + Schemas: []string{"User"}, + }) + require.ErrorContains(t, err, "empty peer ID") } func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { @@ -446,16 +414,13 @@ func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { db.Close(ctx) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore closed") } @@ -463,16 +428,13 @@ func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") } @@ -486,15 +448,12 @@ func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { }`) require.NoError(t, err) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + }) require.NoError(t, err) } @@ -522,26 +481,7 @@ func TestPushToReplicator_SingleDocumentNoPeer_FailedToReplicateLogError(t *test txn, err := db.NewTxn(ctx, true) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.pushToReplicator(ctx, txn, col, keysCh, n.PeerID()) - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "Failed to replicate log", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") } func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { @@ -550,13 +490,10 @@ func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { db.Close(ctx) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n.PeerID()), - Collections: []string{"User"}, - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore closed") } @@ -564,13 +501,10 @@ func TestDeleteReplicator_WithTargetSelf_SelfTargetForReplicatorError(t *testing ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n.PeerID()), - Collections: []string{"User"}, - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorIs(t, err, ErrSelfTargetForReplicator) } @@ -580,13 +514,10 @@ func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - Collections: []string{"User"}, - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") } @@ -602,23 +533,14 @@ func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *te _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) - _, err = n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - }, - ) + err = n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) } @@ -628,12 +550,9 @@ func TestDeleteReplicator_WithNoCollection_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) } @@ -649,13 +568,10 @@ func TestDeleteReplicator_WithNotSetReplicator_KeyNotFoundError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err = n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - Collections: []string{"User"}, - }, - ) + err = n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore: key not found") } @@ -671,30 +587,16 @@ func TestGetAllReplicator_WithReplicator_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) - require.NoError(t, err) - - reps, err := n.Peer.GetAllReplicators( - ctx, - &pb.GetAllReplicatorRequest{}, - ) - require.NoError(t, err) - - info, err := peer.AddrInfoFromP2pAddr(addr) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) - id, err := info.ID.MarshalBinary() + reps, err := n.Peer.GetAllReplicators(ctx) require.NoError(t, err) - require.Equal(t, id, reps.Replicators[0].Info.Id) + require.Len(t, reps, 1) + require.Equal(t, n2.PeerInfo().ID, reps[0].Info.ID) } func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { @@ -703,10 +605,7 @@ func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { db.Close(ctx) - _, err := n.Peer.GetAllReplicators( - ctx, - &pb.GetAllReplicatorRequest{}, - ) + _, err := n.Peer.GetAllReplicators(ctx) require.ErrorContains(t, err, "datastore closed") } @@ -732,15 +631,9 @@ func TestLoadReplicator_WithReplicator_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) err = n.Peer.loadReplicators(ctx) @@ -759,15 +652,9 @@ func TestLoadReplicator_WithReplicatorAndEmptyReplicatorMap_NoError(t *testing.T _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) n.replicators = make(map[string]map[peer.ID]struct{}) @@ -780,12 +667,7 @@ func TestAddP2PCollections_WithInvalidCollectionID_NotFoundError(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{"invalid_collection"}, - }, - ) + err := n.Peer.AddP2PCollections(ctx, []string{"invalid_collection"}) require.Error(t, err, ds.ErrNotFound) } @@ -802,12 +684,7 @@ func TestAddP2PCollections_NoError(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaID()}) require.NoError(t, err) } @@ -815,12 +692,7 @@ func TestRemoveP2PCollectionsWithInvalidCollectionID(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.RemoveP2PCollections( - ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: []string{"invalid_collection"}, - }, - ) + err := n.Peer.RemoveP2PCollections(ctx, []string{"invalid_collection"}) require.Error(t, err, ds.ErrNotFound) } @@ -837,12 +709,7 @@ func TestRemoveP2PCollections(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.RemoveP2PCollections( - ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.RemoveP2PCollections(ctx, []string{col.SchemaID()}) require.NoError(t, err) } @@ -850,12 +717,9 @@ func TestGetAllP2PCollectionsWithNoCollections(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - cols, err := n.Peer.GetAllP2PCollections( - ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.Peer.GetAllP2PCollections(ctx) require.NoError(t, err) - require.Len(t, cols.Collections, 0) + require.Len(t, cols, 0) } func TestGetAllP2PCollections(t *testing.T) { @@ -871,25 +735,12 @@ func TestGetAllP2PCollections(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaID()}) require.NoError(t, err) - cols, err := n.Peer.GetAllP2PCollections( - ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.Peer.GetAllP2PCollections(ctx) require.NoError(t, err) - require.Equal(t, &pb.GetAllP2PCollectionsReply{ - Collections: []*pb.GetAllP2PCollectionsReply_Collection{{ - Id: col.SchemaID(), - Name: col.Name(), - }}, - }, cols) + require.ElementsMatch(t, []string{col.SchemaID()}, cols) } func TestHandleDocCreateLog_NoError(t *testing.T) { @@ -1121,15 +972,9 @@ func TestPushLogToReplicator_WithReplicator_FailedPushingLogError(t *testing.T) _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) col, err := db.GetCollectionByName(ctx, "User") diff --git a/net/server_test.go b/net/server_test.go index 993c12d875..937b4c34b4 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -11,17 +11,12 @@ package net import ( - "bufio" - "bytes" "context" - "encoding/json" - "io" "testing" "time" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" rpc "github.com/textileio/go-libp2p-pubsub-rpc" grpcpeer "google.golang.org/grpc/peer" @@ -29,7 +24,6 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" net_pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -79,7 +73,7 @@ func TestNewServerWithCollectionSubscribed(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - err = n.AddP2PCollection(ctx, col.SchemaID()) + err = n.AddP2PCollections(ctx, []string{col.SchemaID()}) require.NoError(t, err) _, err = newServer(n.Peer, db) @@ -190,46 +184,8 @@ func TestNewServerWithEmitterError(t *testing.T) { n.Peer.host = &mockHost{n.Peer.host} - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - _, err = newServer(n.Peer, db) require.NoError(t, err) - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 2 { - t.Fatalf("expecting exactly 2 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "could not create event emitter", logLines[0]["msg"]) - assert.Equal(t, "could not create event emitter", logLines[1]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func parseLines(r io.Reader) ([]map[string]any, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []map[string]any{} - for fileScanner.Scan() { - loggedLine := make(map[string]any) - err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) - if err != nil { - return nil, err - } - logLines = append(logLines, loggedLine) - } - - return logLines, nil } func TestGetDocGraph(t *testing.T) { diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index f167b882d8..261561ca8d 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -94,17 +94,17 @@ func (w *Wrapper) GetAllReplicators(ctx context.Context) ([]client.Replicator, e return reps, nil } -func (w *Wrapper) AddP2PCollection(ctx context.Context, collectionID string) error { +func (w *Wrapper) AddP2PCollections(ctx context.Context, collectionIDs []string) error { args := []string{"client", "p2p", "collection", "add"} - args = append(args, collectionID) + args = append(args, strings.Join(collectionIDs, ",")) _, err := w.cmd.execute(ctx, args) return err } -func (w *Wrapper) RemoveP2PCollection(ctx context.Context, collectionID string) error { +func (w *Wrapper) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { args := []string{"client", "p2p", "collection", "remove"} - args = append(args, collectionID) + args = append(args, strings.Join(collectionIDs, ",")) _, err := w.cmd.execute(ctx, args) return err diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 10b34129d8..b5ef61c037 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -62,12 +62,12 @@ func (w *Wrapper) GetAllReplicators(ctx context.Context) ([]client.Replicator, e return w.client.GetAllReplicators(ctx) } -func (w *Wrapper) AddP2PCollection(ctx context.Context, collectionID string) error { - return w.client.AddP2PCollection(ctx, collectionID) +func (w *Wrapper) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + return w.client.AddP2PCollections(ctx, collectionIDs) } -func (w *Wrapper) RemoveP2PCollection(ctx context.Context, collectionID string) error { - return w.client.RemoveP2PCollection(ctx, collectionID) +func (w *Wrapper) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + return w.client.RemoveP2PCollections(ctx, collectionIDs) } func (w *Wrapper) GetAllP2PCollections(ctx context.Context) ([]string, error) { diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 83d01743b9..5470d8aee7 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -16,7 +16,6 @@ import ( "strings" "testing" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,7 +25,6 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" - netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" testutils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -112,7 +110,7 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } log.Info(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - n.Boostrap(addrs) + n.Bootstrap(addrs) } if err := n.Start(); err != nil { @@ -301,16 +299,9 @@ func executeTestCase(t *testing.T, test P2PTestCase) { for i, n := range nodes { if reps, ok := test.NodeReplicators[i]; ok { for _, r := range reps { - addr, err := ma.NewMultiaddr( - fmt.Sprintf("%s/p2p/%s", test.NodeConfig[r].Net.P2PAddress, nodes[r].PeerID()), - ) - require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &netpb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err := n.Peer.SetReplicator(ctx, client.Replicator{ + Info: nodes[r].PeerInfo(), + }) require.NoError(t, err) } } @@ -356,12 +347,8 @@ func executeTestCase(t *testing.T, test P2PTestCase) { } } -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - func randomNetworkingConfig() *config.Config { cfg := config.DefaultConfig() - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr + cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0" return cfg } diff --git a/tests/integration/net/state/simple/replicator/with_create_update_test.go b/tests/integration/net/state/simple/replicator/with_create_update_test.go index dd3612055d..c62d63b17c 100644 --- a/tests/integration/net/state/simple/replicator/with_create_update_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_update_test.go @@ -141,10 +141,6 @@ func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) } `, }, - testUtils.ConfigureReplicator{ - SourceNodeID: 0, - TargetNodeID: 1, - }, testUtils.CreateDoc{ // This document is created in all nodes Doc: `{ @@ -152,6 +148,13 @@ func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) "Age": 21 }`, }, + testUtils.ConfigureReplicator{ + // Replication must happen after creating documents + // on both nodes, or a race condition can occur + // on the second node when creating the document + SourceNodeID: 0, + TargetNodeID: 1, + }, testUtils.CreateDoc{ // This document is created in the second node (target) only NodeID: immutable.Some(1), diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 311a088c86..e04e16bb0f 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -14,13 +14,12 @@ import ( "fmt" "time" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" - pb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -59,9 +58,12 @@ type ConfigureReplicator struct { TargetNodeID int } -// NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted -// for a non-existent collection ID when used in actions that support this. -const NonExistentCollectionID int = -1 +const ( + // NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted + // for a non-existent collection ID when used in actions that support this. + NonExistentCollectionID int = -1 + NonExistentCollectionSchemaID = "NonExistentCollectionID" +) // SubscribeToCollection sets up a subscription on the given node to the given collection. // @@ -142,7 +144,7 @@ func connectPeers( s.t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err) } log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - sourceNode.Boostrap(addrs) + sourceNode.Bootstrap(addrs) // Bootstrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be // allowed to complete before documentation begins or it will not even try and sync it. So for now, we @@ -291,17 +293,10 @@ func configureReplicator( time.Sleep(100 * time.Millisecond) sourceNode := s.nodes[cfg.SourceNodeID] targetNode := s.nodes[cfg.TargetNodeID] - targetAddress := s.nodeAddresses[cfg.TargetNodeID] - addr, err := ma.NewMultiaddr(targetAddress) - require.NoError(s.t, err) - - _, err = sourceNode.Peer.SetReplicator( - s.ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err := sourceNode.Peer.SetReplicator(s.ctx, client.Replicator{ + Info: targetNode.PeerInfo(), + }) require.NoError(s.t, err) setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) } @@ -394,7 +389,7 @@ func subscribeToCollection( schemaIDs := []string{} for _, collectionIndex := range action.CollectionIDs { if collectionIndex == NonExistentCollectionID { - schemaIDs = append(schemaIDs, "NonExistentCollectionID") + schemaIDs = append(schemaIDs, NonExistentCollectionSchemaID) continue } @@ -402,12 +397,7 @@ func subscribeToCollection( schemaIDs = append(schemaIDs, col.SchemaID()) } - _, err := n.Peer.AddP2PCollections( - s.ctx, - &pb.AddP2PCollectionsRequest{ - Collections: schemaIDs, - }, - ) + err := n.AddP2PCollections(s.ctx, schemaIDs) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -429,7 +419,7 @@ func unsubscribeToCollection( schemaIDs := []string{} for _, collectionIndex := range action.CollectionIDs { if collectionIndex == NonExistentCollectionID { - schemaIDs = append(schemaIDs, "NonExistentCollectionID") + schemaIDs = append(schemaIDs, NonExistentCollectionSchemaID) continue } @@ -437,12 +427,7 @@ func unsubscribeToCollection( schemaIDs = append(schemaIDs, col.SchemaID()) } - _, err := n.Peer.RemoveP2PCollections( - s.ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: schemaIDs, - }, - ) + err := n.RemoveP2PCollections(s.ctx, schemaIDs) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -460,26 +445,17 @@ func getAllP2PCollections( s *state, action GetAllP2PCollections, ) { - expectedCollections := []*pb.GetAllP2PCollectionsReply_Collection{} + expectedCollections := []string{} for _, collectionIndex := range action.ExpectedCollectionIDs { col := s.collections[action.NodeID][collectionIndex] - expectedCollections = append( - expectedCollections, - &pb.GetAllP2PCollectionsReply_Collection{ - Id: col.SchemaID(), - Name: col.Name(), - }, - ) + expectedCollections = append(expectedCollections, col.SchemaID()) } n := s.nodes[action.NodeID] - cols, err := n.Peer.GetAllP2PCollections( - s.ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.GetAllP2PCollections(s.ctx) require.NoError(s.t, err) - assert.Equal(s.t, expectedCollections, cols.Collections) + assert.Equal(s.t, expectedCollections, cols) } // waitForSync waits for all given wait channels to receive an item signaling completion. @@ -502,14 +478,10 @@ func waitForSync( } } -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - func RandomNetworkingConfig() ConfigureNode { return func() config.Config { cfg := config.DefaultConfig() - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr + cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0" return *cfg } } diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 1108608ef8..3354c43561 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -1460,7 +1460,7 @@ func withRetry( nodeID int, action func() error, ) error { - for i := 0; i < nodes[nodeID].MaxTxnRetries(); i++ { + for i := 0; i < nodes[nodeID].DB.MaxTxnRetries(); i++ { err := action() if err != nil && errors.Is(err, badgerds.ErrTxnConflict) { time.Sleep(100 * time.Millisecond) From c8bde6468412b6c9ab812c940b2e672766af92c1 Mon Sep 17 00:00:00 2001 From: Islam Aliev Date: Sat, 14 Oct 2023 00:26:22 +0200 Subject: [PATCH 24/55] feat: Make queries utilise secondary indexes (#1925) ## Relevant issue(s) Resolves #1555 ## Description With this change the the secondary indexes are utilised during querying data. A dedicated `Indexer` fetcher is implemented to perform fetching of values of indexed fields. Now there is a separate `filter` package that houses a lot of methods for working with filters. A new metric `indexesFetched` is introduced into `@explain` to provide information about how many indexes has been fetched. It also includes an update to the testing framework to allow adding custom asserters. The new ExplainResultsAsserter is used with this new feature. --- client/index.go | 19 + datastore/errors.go | 10 + datastore/prefix_query.go | 81 +++ db/collection_index.go | 89 +-- db/fetcher/encoded_doc.go | 18 + db/fetcher/fetcher.go | 6 + db/fetcher/indexer.go | 165 ++++++ db/fetcher/indexer_iterators.go | 464 +++++++++++++++ db/index.go | 33 +- db/index_test.go | 22 +- db/indexed_docs_test.go | 2 +- errors/defraError.go | 11 +- planner/datasource.go | 35 +- planner/explain.go | 20 +- planner/filter/complex.go | 2 +- planner/filter/copy_field.go | 39 +- planner/filter/copy_field_test.go | 57 +- planner/filter/copy_test.go | 14 +- planner/filter/extract_properties.go | 78 +++ planner/filter/extract_properties_test.go | 115 ++++ planner/filter/remove_field.go | 14 +- planner/filter/remove_field_test.go | 81 ++- planner/filter/split.go | 6 +- planner/filter/split_test.go | 30 +- planner/filter/unwrap_relation.go | 86 +++ planner/filter/unwrap_relation_test.go | 99 ++++ planner/filter/util_test.go | 52 +- planner/mapper/targetable.go | 11 + planner/planner.go | 76 ++- planner/scan.go | 61 +- planner/select.go | 62 +- planner/sum.go | 8 +- planner/type_join.go | 464 ++++++++------- tests/bench/bench_util.go | 2 +- tests/bench/fixtures/fixtures.go | 41 +- tests/bench/query/index/simple_test.go | 97 ++++ tests/bench/query/simple/simple_test.go | 8 +- tests/bench/query/simple/utils.go | 2 +- tests/bench/query/simple/with_filter_test.go | 8 +- .../query/simple/with_limit_offset_test.go | 8 +- .../query/simple/with_multi_lookup_test.go | 6 +- tests/bench/query/simple/with_order_test.go | 8 +- .../query/simple/with_single_lookup_test.go | 8 +- .../explain/execute/create_test.go | 1 + .../explain/execute/delete_test.go | 2 + .../integration/explain/execute/group_test.go | 1 + .../execute/query_deleted_docs_test.go | 1 + .../integration/explain/execute/scan_test.go | 4 + .../explain/execute/top_level_test.go | 3 + .../explain/execute/type_join_test.go | 28 + .../explain/execute/update_test.go | 2 + .../explain/execute/with_average_test.go | 8 + .../explain/execute/with_count_test.go | 7 + .../explain/execute/with_limit_test.go | 8 + .../explain/execute/with_order_test.go | 16 + .../explain/execute/with_sum_test.go | 8 + tests/integration/explain_result_asserter.go | 162 ++++++ tests/integration/index/docs.go | 456 +++++++++++++++ .../index/query_performance_test.go | 86 +++ .../query_with_index_combined_filter_test.go | 87 +++ .../query_with_index_only_filter_test.go | 534 ++++++++++++++++++ .../index/query_with_relation_filter_test.go | 310 ++++++++++ tests/integration/index/utils.go | 290 ++++++++++ tests/integration/results.go | 4 +- tests/integration/state.go | 3 + tests/integration/test_case.go | 35 ++ tests/integration/utils2.go | 243 +++++--- 67 files changed, 4219 insertions(+), 598 deletions(-) create mode 100644 datastore/prefix_query.go create mode 100644 db/fetcher/indexer.go create mode 100644 db/fetcher/indexer_iterators.go create mode 100644 planner/filter/extract_properties.go create mode 100644 planner/filter/extract_properties_test.go create mode 100644 planner/filter/unwrap_relation.go create mode 100644 planner/filter/unwrap_relation_test.go create mode 100644 tests/bench/query/index/simple_test.go create mode 100644 tests/integration/explain_result_asserter.go create mode 100644 tests/integration/index/docs.go create mode 100644 tests/integration/index/query_performance_test.go create mode 100644 tests/integration/index/query_with_index_combined_filter_test.go create mode 100644 tests/integration/index/query_with_index_only_filter_test.go create mode 100644 tests/integration/index/query_with_relation_filter_test.go create mode 100644 tests/integration/index/utils.go diff --git a/client/index.go b/client/index.go index 47b52f00c5..69f0362017 100644 --- a/client/index.go +++ b/client/index.go @@ -37,3 +37,22 @@ type IndexDescription struct { // Fields contains the fields that are being indexed. Fields []IndexedFieldDescription } + +// CollectIndexedFields returns all fields that are indexed by all collection indexes. +func (d CollectionDescription) CollectIndexedFields(schema *SchemaDescription) []FieldDescription { + fieldsMap := make(map[string]bool) + fields := make([]FieldDescription, 0, len(d.Indexes)) + for _, index := range d.Indexes { + for _, field := range index.Fields { + for i := range schema.Fields { + colField := schema.Fields[i] + if field.Name == colField.Name && !fieldsMap[field.Name] { + fieldsMap[field.Name] = true + fields = append(fields, colField) + break + } + } + } + } + return fields +} diff --git a/datastore/errors.go b/datastore/errors.go index b08e6d5e70..b248ce6db8 100644 --- a/datastore/errors.go +++ b/datastore/errors.go @@ -14,6 +14,10 @@ import ( "github.com/sourcenetwork/defradb/errors" ) +const ( + errInvalidStoredValue string = "invalid stored value" +) + // Errors returnable from this package. // // This list is incomplete and undefined errors may also be returned. @@ -26,3 +30,9 @@ var ( // ErrNotFound is an error returned when a block is not found. ErrNotFound = errors.New("blockstore: block not found") ) + +// NewErrInvalidStoredValue returns a new error indicating that the stored +// value in the database is invalid. +func NewErrInvalidStoredValue(inner error) error { + return errors.Wrap(errInvalidStoredValue, inner) +} diff --git a/datastore/prefix_query.go b/datastore/prefix_query.go new file mode 100644 index 0000000000..7150aebe48 --- /dev/null +++ b/datastore/prefix_query.go @@ -0,0 +1,81 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package datastore + +import ( + "context" + "encoding/json" + + ds "github.com/ipfs/go-datastore" + + "github.com/ipfs/go-datastore/query" +) + +// DeserializePrefix deserializes all elements with the given prefix from the given storage. +// It returns the keys and their corresponding elements. +func DeserializePrefix[T any]( + ctx context.Context, + prefix string, + store ds.Read, +) ([]string, []T, error) { + q, err := store.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, nil, err + } + + keys := make([]string, 0) + elements := make([]T, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, nil, res.Error + } + + var element T + err = json.Unmarshal(res.Value, &element) + if err != nil { + _ = q.Close() + return nil, nil, NewErrInvalidStoredValue(err) + } + keys = append(keys, res.Key) + elements = append(elements, element) + } + if err := q.Close(); err != nil { + return nil, nil, err + } + return keys, elements, nil +} + +// FetchKeysForPrefix fetches all keys with the given prefix from the given storage. +func FetchKeysForPrefix( + ctx context.Context, + prefix string, + store ds.Read, +) ([]ds.Key, error) { + q, err := store.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, err + } + + keys := make([]ds.Key, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + keys = append(keys, ds.NewKey(res.Key)) + } + if err = q.Close(); err != nil { + return nil, err + } + + return keys, nil +} diff --git a/db/collection_index.go b/db/collection_index.go index 791817a0a3..f3c1ba2e98 100644 --- a/db/collection_index.go +++ b/db/collection_index.go @@ -17,9 +17,6 @@ import ( "strconv" "strings" - ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" @@ -63,7 +60,7 @@ func (db *db) getAllIndexes( ) (map[client.CollectionName][]client.IndexDescription, error) { prefix := core.NewCollectionIndexKey("", "") - deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + keys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -72,12 +69,15 @@ func (db *db) getAllIndexes( indexes := make(map[client.CollectionName][]client.IndexDescription) - for _, indexRec := range deserializedIndexes { - indexKey, err := core.NewCollectionIndexKeyFromString(indexRec.key) + for i := range keys { + indexKey, err := core.NewCollectionIndexKeyFromString(keys[i]) if err != nil { return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) } - indexes[indexKey.CollectionName] = append(indexes[indexKey.CollectionName], indexRec.element) + indexes[indexKey.CollectionName] = append( + indexes[indexKey.CollectionName], + indexDescriptions[i], + ) } return indexes, nil @@ -89,16 +89,12 @@ func (db *db) fetchCollectionIndexDescriptions( colName string, ) ([]client.IndexDescription, error) { prefix := core.NewCollectionIndexKey(colName, "") - deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { return nil, err } - indexes := make([]client.IndexDescription, 0, len(deserializedIndexes)) - for _, indexRec := range deserializedIndexes { - indexes = append(indexes, indexRec.element) - } - return indexes, nil + return indexDescriptions, nil } func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error { @@ -115,27 +111,6 @@ func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *cl return nil } -// collectIndexedFields returns all fields that are indexed by all collection indexes. -func (c *collection) collectIndexedFields() []client.FieldDescription { - fieldsMap := make(map[string]client.FieldDescription) - for _, index := range c.indexes { - for _, field := range index.Description().Fields { - for i := range c.desc.Schema.Fields { - colField := c.desc.Schema.Fields[i] - if field.Name == colField.Name { - fieldsMap[field.Name] = colField - break - } - } - } - } - fields := make([]client.FieldDescription, 0, len(fieldsMap)) - for _, field := range fieldsMap { - fields = append(fields, field) - } - return fields -} - func (c *collection) updateIndexedDoc( ctx context.Context, txn datastore.Txn, @@ -145,7 +120,13 @@ func (c *collection) updateIndexedDoc( if err != nil { return err } - oldDoc, err := c.get(ctx, txn, c.getPrimaryKeyFromDocKey(doc.Key()), c.collectIndexedFields(), false) + desc := c.Description() + oldDoc, err := c.get( + ctx, + txn, + c.getPrimaryKeyFromDocKey(doc.Key()), desc.CollectIndexedFields(&desc.Schema), + false, + ) if err != nil { return err } @@ -370,7 +351,7 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error { prefix := core.NewCollectionIndexKey(c.Name(), "") - keys, err := fetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) + keys, err := datastore.FetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) if err != nil { return err } @@ -510,39 +491,3 @@ func generateIndexName(col client.Collection, fields []client.IndexedFieldDescri } return sb.String() } - -type deserializedElement[T any] struct { - key string - element T -} - -func deserializePrefix[T any]( - ctx context.Context, - prefix string, - storage ds.Read, -) ([]deserializedElement[T], error) { - q, err := storage.Query(ctx, query.Query{Prefix: prefix}) - if err != nil { - return nil, NewErrFailedToCreateCollectionQuery(err) - } - - elements := make([]deserializedElement[T], 0) - for res := range q.Next() { - if res.Error != nil { - _ = q.Close() - return nil, res.Error - } - - var element T - err = json.Unmarshal(res.Value, &element) - if err != nil { - _ = q.Close() - return nil, NewErrInvalidStoredIndex(err) - } - elements = append(elements, deserializedElement[T]{key: res.Key, element: element}) - } - if err := q.Close(); err != nil { - return nil, err - } - return elements, nil -} diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go index 3e19eb2218..bc22471465 100644 --- a/db/fetcher/encoded_doc.go +++ b/db/fetcher/encoded_doc.go @@ -131,6 +131,24 @@ func Decode(encdoc EncodedDocument) (*client.Document, error) { return doc, nil } +// MergeProperties merges the properties of the given document into this document. +// Existing fields of the current document are overwritten. +func (encdoc *encodedDocument) MergeProperties(other EncodedDocument) { + otherEncDoc, ok := other.(*encodedDocument) + if !ok { + return + } + for field, prop := range otherEncDoc.properties { + encdoc.properties[field] = prop + } + if other.Key() != nil { + encdoc.key = other.Key() + } + if other.SchemaVersionID() != "" { + encdoc.schemaVersionID = other.SchemaVersionID() + } +} + // DecodeToDoc returns a decoded document as a // map of field/value pairs func DecodeToDoc(encdoc EncodedDocument, mapping *core.DocumentMapping, filter bool) (core.Doc, error) { diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index 34f05d4f1d..8935e617cc 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -33,18 +33,22 @@ type ExecInfo struct { DocsFetched uint64 // Number of fields fetched. FieldsFetched uint64 + // Number of indexes fetched. + IndexesFetched uint64 } // Add adds the other ExecInfo to the current ExecInfo. func (s *ExecInfo) Add(other ExecInfo) { s.DocsFetched += other.DocsFetched s.FieldsFetched += other.FieldsFetched + s.IndexesFetched += other.IndexesFetched } // Reset resets the ExecInfo. func (s *ExecInfo) Reset() { s.DocsFetched = 0 s.FieldsFetched = 0 + s.IndexesFetched = 0 } // Fetcher is the interface for collecting documents from the underlying data store. @@ -576,6 +580,8 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec // keyparts := df.kv.Key.List() // key := keyparts[len(keyparts)-2] + prevExecInfo := df.execInfo + defer func() { df.execInfo.Add(prevExecInfo) }() df.execInfo.Reset() // iterate until we have collected all the necessary kv pairs for the doc // we'll know when were done when either diff --git a/db/fetcher/indexer.go b/db/fetcher/indexer.go new file mode 100644 index 0000000000..da4dc6a580 --- /dev/null +++ b/db/fetcher/indexer.go @@ -0,0 +1,165 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package fetcher + +import ( + "context" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// IndexFetcher is a fetcher that fetches documents by index. +// It fetches only the indexed field and the rest of the fields are fetched by the internal fetcher. +type IndexFetcher struct { + docFetcher Fetcher + col *client.CollectionDescription + txn datastore.Txn + indexFilter *mapper.Filter + docFilter *mapper.Filter + doc *encodedDocument + mapping *core.DocumentMapping + indexedField client.FieldDescription + docFields []client.FieldDescription + indexIter indexIterator + indexDataStoreKey core.IndexDataStoreKey + execInfo ExecInfo +} + +var _ Fetcher = (*IndexFetcher)(nil) + +// NewIndexFetcher creates a new IndexFetcher. +func NewIndexFetcher( + docFetcher Fetcher, + indexedFieldDesc client.FieldDescription, + indexFilter *mapper.Filter, +) *IndexFetcher { + return &IndexFetcher{ + docFetcher: docFetcher, + indexedField: indexedFieldDesc, + indexFilter: indexFilter, + } +} + +func (f *IndexFetcher) Init( + ctx context.Context, + txn datastore.Txn, + col *client.CollectionDescription, + fields []client.FieldDescription, + filter *mapper.Filter, + docMapper *core.DocumentMapping, + reverse bool, + showDeleted bool, +) error { + f.col = col + f.docFilter = filter + f.doc = &encodedDocument{} + f.mapping = docMapper + f.txn = txn + + for _, index := range col.Indexes { + if index.Fields[0].Name == f.indexedField.Name { + f.indexDataStoreKey.IndexID = index.ID + break + } + } + + f.indexDataStoreKey.CollectionID = f.col.ID + + for i := range fields { + if fields[i].Name == f.indexedField.Name { + f.docFields = append(fields[:i], fields[i+1:]...) + break + } + } + + iter, err := createIndexIterator(f.indexDataStoreKey, f.indexFilter, &f.execInfo) + if err != nil { + return err + } + f.indexIter = iter + + if f.docFetcher != nil && len(f.docFields) > 0 { + err = f.docFetcher.Init(ctx, f.txn, f.col, f.docFields, f.docFilter, f.mapping, false, false) + } + + return err +} + +func (f *IndexFetcher) Start(ctx context.Context, spans core.Spans) error { + err := f.indexIter.Init(ctx, f.txn.Datastore()) + if err != nil { + return err + } + return nil +} + +func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) { + totalExecInfo := f.execInfo + defer func() { f.execInfo.Add(totalExecInfo) }() + f.execInfo.Reset() + for { + f.doc.Reset() + + indexKey, hasValue, err := f.indexIter.Next() + if err != nil { + return nil, ExecInfo{}, err + } + + if !hasValue { + return nil, f.execInfo, nil + } + + property := &encProperty{ + Desc: f.indexedField, + Raw: indexKey.FieldValues[0], + } + + f.doc.key = indexKey.FieldValues[1] + f.doc.properties[f.indexedField] = property + f.execInfo.FieldsFetched++ + + if f.docFetcher != nil && len(f.docFields) > 0 { + targetKey := base.MakeDocKey(*f.col, string(f.doc.key)) + spans := core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd())) + err = f.docFetcher.Start(ctx, spans) + if err != nil { + return nil, ExecInfo{}, err + } + encDoc, execInfo, err := f.docFetcher.FetchNext(ctx) + if err != nil { + return nil, ExecInfo{}, err + } + err = f.docFetcher.Close() + if err != nil { + return nil, ExecInfo{}, err + } + f.execInfo.Add(execInfo) + if encDoc == nil { + continue + } + f.doc.MergeProperties(encDoc) + } else { + f.execInfo.DocsFetched++ + } + return f.doc, f.execInfo, nil + } +} + +func (f *IndexFetcher) Close() error { + if f.indexIter != nil { + return f.indexIter.Close() + } + return nil +} diff --git a/db/fetcher/indexer_iterators.go b/db/fetcher/indexer_iterators.go new file mode 100644 index 0000000000..b563c9b3a3 --- /dev/null +++ b/db/fetcher/indexer_iterators.go @@ -0,0 +1,464 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package fetcher + +import ( + "bytes" + "context" + "errors" + "strings" + + "github.com/fxamacker/cbor/v2" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/ipfs/go-datastore/query" +) + +const ( + opEq = "_eq" + opGt = "_gt" + opGe = "_ge" + opLt = "_lt" + opLe = "_le" + opNe = "_ne" + opIn = "_in" + opNin = "_nin" + opLike = "_like" + opNlike = "_nlike" +) + +// indexIterator is an iterator over index keys. +// It is used to iterate over the index keys that match a specific condition. +// For example, iteration over condition _eq and _gt will have completely different logic. +type indexIterator interface { + Init(context.Context, datastore.DSReaderWriter) error + Next() (core.IndexDataStoreKey, bool, error) + Close() error +} + +type queryResultIterator struct { + resultIter query.Results +} + +func (i queryResultIterator) Next() (core.IndexDataStoreKey, bool, error) { + res, hasVal := i.resultIter.NextSync() + if res.Error != nil { + return core.IndexDataStoreKey{}, false, res.Error + } + if !hasVal { + return core.IndexDataStoreKey{}, false, nil + } + key, err := core.NewIndexDataStoreKey(res.Key) + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + return key, true, nil +} + +func (i queryResultIterator) Close() error { + return i.resultIter.Close() +} + +type eqIndexIterator struct { + queryResultIterator + indexKey core.IndexDataStoreKey + filterVal []byte + execInfo *ExecInfo +} + +func (i *eqIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.indexKey.FieldValues = [][]byte{i.filterVal} + resultIter, err := store.Query(ctx, query.Query{ + Prefix: i.indexKey.ToString(), + KeysOnly: true, + }) + if err != nil { + return err + } + i.resultIter = resultIter + return nil +} + +func (i *eqIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + key, hasValue, err := i.queryResultIterator.Next() + if hasValue { + i.execInfo.IndexesFetched++ + } + return key, hasValue, err +} + +type inIndexIterator struct { + eqIndexIterator + filterValues [][]byte + nextValIndex int + ctx context.Context + store datastore.DSReaderWriter + hasIterator bool +} + +func newInIndexIterator( + indexKey core.IndexDataStoreKey, + filterValues [][]byte, + execInfo *ExecInfo, +) *inIndexIterator { + return &inIndexIterator{ + eqIndexIterator: eqIndexIterator{ + indexKey: indexKey, + execInfo: execInfo, + }, + filterValues: filterValues, + } +} + +func (i *inIndexIterator) nextIterator() (bool, error) { + if i.nextValIndex > 0 { + err := i.eqIndexIterator.Close() + if err != nil { + return false, err + } + } + + if i.nextValIndex >= len(i.filterValues) { + return false, nil + } + + i.filterVal = i.filterValues[i.nextValIndex] + err := i.eqIndexIterator.Init(i.ctx, i.store) + if err != nil { + return false, err + } + i.nextValIndex++ + return true, nil +} + +func (i *inIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.ctx = ctx + i.store = store + var err error + i.hasIterator, err = i.nextIterator() + return err +} + +func (i *inIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + for i.hasIterator { + key, hasValue, err := i.eqIndexIterator.Next() + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + if !hasValue { + i.hasIterator, err = i.nextIterator() + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + continue + } + return key, true, nil + } + return core.IndexDataStoreKey{}, false, nil +} + +func (i *inIndexIterator) Close() error { + return nil +} + +type errorCheckingFilter struct { + matcher indexMatcher + err error +} + +func (f *errorCheckingFilter) Filter(e query.Entry) bool { + if f.err != nil { + return false + } + indexKey, err := core.NewIndexDataStoreKey(e.Key) + if err != nil { + f.err = err + return false + } + res, err := f.matcher.Match(indexKey) + if err != nil { + f.err = err + return false + } + return res +} + +// execInfoIndexMatcherDecorator is a decorator for indexMatcher that counts the number +// of indexes fetched on every call to Match. +type execInfoIndexMatcherDecorator struct { + matcher indexMatcher + execInfo *ExecInfo +} + +func (d *execInfoIndexMatcherDecorator) Match(key core.IndexDataStoreKey) (bool, error) { + d.execInfo.IndexesFetched++ + return d.matcher.Match(key) +} + +type scanningIndexIterator struct { + queryResultIterator + indexKey core.IndexDataStoreKey + matcher indexMatcher + filter errorCheckingFilter + execInfo *ExecInfo +} + +func (i *scanningIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.filter.matcher = &execInfoIndexMatcherDecorator{matcher: i.matcher, execInfo: i.execInfo} + + iter, err := store.Query(ctx, query.Query{ + Prefix: i.indexKey.ToString(), + KeysOnly: true, + Filters: []query.Filter{&i.filter}, + }) + if err != nil { + return err + } + i.resultIter = iter + + return nil +} + +func (i *scanningIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + key, hasValue, err := i.queryResultIterator.Next() + if i.filter.err != nil { + return core.IndexDataStoreKey{}, false, i.filter.err + } + return key, hasValue, err +} + +// checks if the stored index value satisfies the condition +type indexMatcher interface { + Match(core.IndexDataStoreKey) (bool, error) +} + +// indexByteValuesMatcher is a filter that compares the index value with a given value. +// It uses bytes.Compare to compare the values and evaluate the result with evalFunc. +type indexByteValuesMatcher struct { + value []byte + // evalFunc receives a result of bytes.Compare + evalFunc func(int) bool +} + +func (m *indexByteValuesMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + res := bytes.Compare(key.FieldValues[0], m.value) + return m.evalFunc(res), nil +} + +// matcher if _ne condition is met +type neIndexMatcher struct { + value []byte +} + +func (m *neIndexMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + return !bytes.Equal(key.FieldValues[0], m.value), nil +} + +// checks if the index value is or is not in the given array +type indexInArrayMatcher struct { + values map[string]bool + isIn bool +} + +func newNinIndexCmp(values [][]byte, isIn bool) *indexInArrayMatcher { + valuesMap := make(map[string]bool) + for _, v := range values { + valuesMap[string(v)] = true + } + return &indexInArrayMatcher{values: valuesMap, isIn: isIn} +} + +func (m *indexInArrayMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + _, found := m.values[string(key.FieldValues[0])] + return found == m.isIn, nil +} + +// checks if the index value satisfies the LIKE condition +type indexLikeMatcher struct { + hasPrefix bool + hasSuffix bool + startAndEnd []string + isLike bool + value string +} + +func newLikeIndexCmp(filterValue string, isLike bool) *indexLikeMatcher { + matcher := &indexLikeMatcher{ + isLike: isLike, + } + if len(filterValue) >= 2 { + if filterValue[0] == '%' { + matcher.hasPrefix = true + filterValue = strings.TrimPrefix(filterValue, "%") + } + if filterValue[len(filterValue)-1] == '%' { + matcher.hasSuffix = true + filterValue = strings.TrimSuffix(filterValue, "%") + } + if !matcher.hasPrefix && !matcher.hasSuffix { + matcher.startAndEnd = strings.Split(filterValue, "%") + } + } + matcher.value = filterValue + + return matcher +} + +func (m *indexLikeMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + var currentVal string + err := cbor.Unmarshal(key.FieldValues[0], ¤tVal) + if err != nil { + return false, err + } + + return m.doesMatch(currentVal) == m.isLike, nil +} + +func (m *indexLikeMatcher) doesMatch(currentVal string) bool { + switch { + case m.hasPrefix && m.hasSuffix: + return strings.Contains(currentVal, m.value) + case m.hasPrefix: + return strings.HasSuffix(currentVal, m.value) + case m.hasSuffix: + return strings.HasPrefix(currentVal, m.value) + // there might be 2 ends only for LIKE with 1 % in the middle "ab%cd" + case len(m.startAndEnd) == 2: + return strings.HasPrefix(currentVal, m.startAndEnd[0]) && + strings.HasSuffix(currentVal, m.startAndEnd[1]) + default: + return m.value == currentVal + } +} + +func createIndexIterator( + indexDataStoreKey core.IndexDataStoreKey, + indexFilterConditions *mapper.Filter, + execInfo *ExecInfo, +) (indexIterator, error) { + var op string + var filterVal any + for _, indexFilterCond := range indexFilterConditions.Conditions { + condMap := indexFilterCond.(map[connor.FilterKey]any) + var key connor.FilterKey + for key, filterVal = range condMap { + break + } + opKey := key.(*mapper.Operator) + op = opKey.Operation + break + } + + switch op { + case opEq, opGt, opGe, opLt, opLe, opNe: + writableValue := client.NewCBORValue(client.LWW_REGISTER, filterVal) + + valueBytes, err := writableValue.Bytes() + if err != nil { + return nil, err + } + + switch op { + case opEq: + return &eqIndexIterator{ + indexKey: indexDataStoreKey, + filterVal: valueBytes, + execInfo: execInfo, + }, nil + case opGt: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res > 0 }, + }, + execInfo: execInfo, + }, nil + case opGe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res > 0 || res == 0 }, + }, + execInfo: execInfo, + }, nil + case opLt: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res < 0 }, + }, + execInfo: execInfo, + }, nil + case opLe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res < 0 || res == 0 }, + }, + execInfo: execInfo, + }, nil + case opNe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &neIndexMatcher{ + value: valueBytes, + }, + execInfo: execInfo, + }, nil + } + case opIn, opNin: + inArr, ok := filterVal.([]any) + if !ok { + return nil, errors.New("invalid _in/_nin value") + } + valArr := make([][]byte, 0, len(inArr)) + for _, v := range inArr { + writableValue := client.NewCBORValue(client.LWW_REGISTER, v) + valueBytes, err := writableValue.Bytes() + if err != nil { + return nil, err + } + valArr = append(valArr, valueBytes) + } + if op == opIn { + return newInIndexIterator(indexDataStoreKey, valArr, execInfo), nil + } else { + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newNinIndexCmp(valArr, false), + execInfo: execInfo, + }, nil + } + case opLike: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newLikeIndexCmp(filterVal.(string), true), + execInfo: execInfo, + }, nil + case opNlike: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newLikeIndexCmp(filterVal.(string), false), + execInfo: execInfo, + }, nil + } + + return nil, errors.New("invalid index filter condition") +} diff --git a/db/index.go b/db/index.go index 2c5ea2d6b2..7314bc2a08 100644 --- a/db/index.go +++ b/db/index.go @@ -14,10 +14,6 @@ import ( "context" "time" - ds "github.com/ipfs/go-datastore" - - "github.com/ipfs/go-datastore/query" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" @@ -47,7 +43,7 @@ func canConvertIndexFieldValue[T any](val any) bool { func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { switch kind { - case client.FieldKind_STRING: + case client.FieldKind_STRING, client.FieldKind_FOREIGN_OBJECT: return canConvertIndexFieldValue[string] case client.FieldKind_INT: return canConvertIndexFieldValue[int64] @@ -179,31 +175,6 @@ func (i *collectionSimpleIndex) Update( return i.Save(ctx, txn, newDoc) } -func fetchKeysForPrefix( - ctx context.Context, - prefix string, - storage ds.Read, -) ([]ds.Key, error) { - q, err := storage.Query(ctx, query.Query{Prefix: prefix}) - if err != nil { - return nil, err - } - - keys := make([]ds.Key, 0) - for res := range q.Next() { - if res.Error != nil { - _ = q.Close() - return nil, res.Error - } - keys = append(keys, ds.NewKey(res.Key)) - } - if err = q.Close(); err != nil { - return nil, err - } - - return keys, nil -} - // RemoveAll remove all artifacts of the index from the storage, i.e. all index // field values for all documents. func (i *collectionSimpleIndex) RemoveAll(ctx context.Context, txn datastore.Txn) error { @@ -211,7 +182,7 @@ func (i *collectionSimpleIndex) RemoveAll(ctx context.Context, txn datastore.Txn prefixKey.CollectionID = i.collection.ID() prefixKey.IndexID = i.desc.ID - keys, err := fetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) + keys, err := datastore.FetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) if err != nil { return err } diff --git a/db/index_test.go b/db/index_test.go index dce7e65bb4..67c3f232d0 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -703,7 +703,7 @@ func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { assert.NoError(t, err) _, err = f.getAllIndexes() - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { @@ -728,14 +728,15 @@ func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { func TestGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) + testErr := errors.New("test error") + mockedTxn := f.mockTxn() mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() - mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). - Return(nil, errors.New("test error")) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) _, err := f.getAllIndexes() - assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) + assert.ErrorIs(t, err, testErr) } func TestGetIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { @@ -779,7 +780,7 @@ func TestGetIndexes_IfSystemStoreHasInvalidData_ReturnError(t *testing.T) { mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) _, err := f.getAllIndexes() - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestGetIndexes_IfFailsToReadSeqNumber_ReturnError(t *testing.T) { @@ -853,15 +854,16 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) func TestGetCollectionIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) + testErr := errors.New("test error") + mockedTxn := f.mockTxn() mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) - mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). - Return(nil, errors.New("test error")) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) _, err := f.getCollectionIndexes(usersColName) - assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) + assert.ErrorIs(t, err, testErr) } func TestGetCollectionIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { @@ -902,7 +904,7 @@ func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { assert.NoError(t, err) _, err = f.getCollectionIndexes(usersColName) - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestCollectionGetIndexes_ShouldReturnIndexes(t *testing.T) { @@ -967,7 +969,7 @@ func TestCollectionGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { }, { Name: "Query iterator returns invalid value", - ExpectedError: NewErrInvalidStoredIndex(nil), + ExpectedError: datastore.NewErrInvalidStoredValue(nil), GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { store := mocks.NewDSReaderWriter(t) store.EXPECT().Query(mock.Anything, mock.Anything). diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index b62cb992d6..5634686778 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -325,7 +325,7 @@ func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T) Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) - require.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) { diff --git a/errors/defraError.go b/errors/defraError.go index 2281add30e..2f05f1131d 100644 --- a/errors/defraError.go +++ b/errors/defraError.go @@ -58,13 +58,12 @@ func (e *defraError) Error() string { } func (e *defraError) Is(other error) bool { - switch otherTyped := other.(type) { - case *defraError: - return e.message == otherTyped.message - default: - otherString := other.Error() - return e.message == otherString || e.Error() == otherString || errors.Is(e.inner, other) + var otherDefraError *defraError + if errors.As(other, &otherDefraError) { + return e.message == otherDefraError.message } + otherString := other.Error() + return e.message == otherString || e.Error() == otherString || errors.Is(e.inner, other) } func (e *defraError) Unwrap() error { diff --git a/planner/datasource.go b/planner/datasource.go index afcfbab3ce..862f43bd33 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -11,11 +11,7 @@ package planner import ( - "encoding/json" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/planner/mapper" ) @@ -35,13 +31,13 @@ func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { return p.getCollectionScanPlan(parsed) } -func (p *Planner) getCollectionScanPlan(parsed *mapper.Select) (planSource, error) { - colDesc, err := p.getCollectionDesc(parsed.CollectionName) +func (p *Planner) getCollectionScanPlan(mapperSelect *mapper.Select) (planSource, error) { + col, err := p.db.GetCollectionByName(p.ctx, mapperSelect.CollectionName) if err != nil { return planSource{}, err } - scan, err := p.Scan(parsed) + scan, err := p.Scan(mapperSelect, col.Description()) if err != nil { return planSource{}, err } @@ -49,30 +45,7 @@ func (p *Planner) getCollectionScanPlan(parsed *mapper.Select) (planSource, erro return planSource{ plan: scan, info: sourceInfo{ - collectionDescription: colDesc, + collectionDescription: col.Description(), }, }, nil } - -func (p *Planner) getCollectionDesc(name string) (client.CollectionDescription, error) { - collectionKey := core.NewCollectionKey(name) - var desc client.CollectionDescription - schemaVersionIdBytes, err := p.txn.Systemstore().Get(p.ctx, collectionKey.ToDS()) - if err != nil { - return desc, errors.Wrap("failed to get collection description", err) - } - - schemaVersionId := string(schemaVersionIdBytes) - schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionId) - buf, err := p.txn.Systemstore().Get(p.ctx, schemaVersionKey.ToDS()) - if err != nil { - return desc, err - } - - err = json.Unmarshal(buf, &desc) - if err != nil { - return desc, err - } - - return desc, nil -} diff --git a/planner/explain.go b/planner/explain.go index 560063b4ba..07f96f9b0a 100644 --- a/planner/explain.go +++ b/planner/explain.go @@ -286,10 +286,10 @@ func buildSimpleExplainGraph(source planNode) (map[string]any, error) { // // Note: Can only be called once the entire plan has been executed. func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { - excuteExplainInfo := map[string]any{} + executeExplainInfo := map[string]any{} if executedPlan == nil { - return excuteExplainInfo, nil + return executeExplainInfo, nil } switch executedNode := executedPlan.(type) { @@ -303,16 +303,16 @@ func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { multiChildExplainGraph = append(multiChildExplainGraph, childExplainGraph) } explainNodeLabelTitle := strcase.ToLowerCamel(executedNode.Kind()) - excuteExplainInfo[explainNodeLabelTitle] = multiChildExplainGraph + executeExplainInfo[explainNodeLabelTitle] = multiChildExplainGraph case explainablePlanNode: - excuteExplainBuilder, err := executedNode.Explain(request.ExecuteExplain) + executeExplainBuilder, err := executedNode.Explain(request.ExecuteExplain) if err != nil { return nil, err } - if excuteExplainBuilder == nil { - excuteExplainBuilder = map[string]any{} + if executeExplainBuilder == nil { + executeExplainBuilder = map[string]any{} } if next := executedNode.Source(); next != nil && next.Kind() != topLevelNodeKind { @@ -321,21 +321,21 @@ func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { return nil, err } for key, value := range nextExplainGraph { - excuteExplainBuilder[key] = value + executeExplainBuilder[key] = value } } explainNodeLabelTitle := strcase.ToLowerCamel(executedNode.Kind()) - excuteExplainInfo[explainNodeLabelTitle] = excuteExplainBuilder + executeExplainInfo[explainNodeLabelTitle] = executeExplainBuilder default: var err error - excuteExplainInfo, err = collectExecuteExplainInfo(executedPlan.Source()) + executeExplainInfo, err = collectExecuteExplainInfo(executedPlan.Source()) if err != nil { return nil, err } } - return excuteExplainInfo, nil + return executeExplainInfo, nil } // executeAndExplainRequest executes the plan graph gathering the information/datapoints diff --git a/planner/filter/complex.go b/planner/filter/complex.go index 098caefc9c..acc2de4883 100644 --- a/planner/filter/complex.go +++ b/planner/filter/complex.go @@ -17,7 +17,7 @@ import ( // IsComplex returns true if the provided filter is complex. // A filter is considered complex if it contains a relation -// object withing an _or operator not necessarily being +// object withing an _or or _not operator not necessarily being // its direct child. func IsComplex(filter *mapper.Filter) bool { if filter == nil { diff --git a/planner/filter/copy_field.go b/planner/filter/copy_field.go index 59f7db3471..70b5dc2956 100644 --- a/planner/filter/copy_field.go +++ b/planner/filter/copy_field.go @@ -14,18 +14,22 @@ import ( "github.com/sourcenetwork/defradb/planner/mapper" ) -// copyField copies the given field from the provided filter. +// CopyField copies the given field from the provided filter. +// Multiple fields can be passed to copy related objects with a certain field. // The result filter preserves the structure of the original filter. -func copyField(filter *mapper.Filter, field mapper.Field) *mapper.Filter { - if filter == nil { +func CopyField(filter *mapper.Filter, fields ...mapper.Field) *mapper.Filter { + if filter == nil || len(fields) == 0 { return nil } - conditionKey := &mapper.PropertyIndex{ - Index: field.Index, + var conditionKeys []*mapper.PropertyIndex + for _, field := range fields { + conditionKeys = append(conditionKeys, &mapper.PropertyIndex{ + Index: field.Index, + }) } resultFilter := &mapper.Filter{} - conditionMap := traverseFilterByProperty(conditionKey, filter.Conditions, false) + conditionMap := traverseFilterByProperty(conditionKeys, filter.Conditions, false) if len(conditionMap) > 0 { resultFilter.Conditions = conditionMap return resultFilter @@ -34,7 +38,7 @@ func copyField(filter *mapper.Filter, field mapper.Field) *mapper.Filter { } func traverseFilterByProperty( - key *mapper.PropertyIndex, + keys []*mapper.PropertyIndex, conditions map[connor.FilterKey]any, shouldDelete bool, ) map[connor.FilterKey]any { @@ -43,11 +47,20 @@ func traverseFilterByProperty( result = make(map[connor.FilterKey]any) } for targetKey, clause := range conditions { - if targetKey.Equal(key) { - if shouldDelete { - delete(result, targetKey) + if targetKey.Equal(keys[0]) { + if len(keys) > 1 { + related := traverseFilterByProperty(keys[1:], clause.(map[connor.FilterKey]any), shouldDelete) + if shouldDelete && len(related) == 0 { + delete(result, targetKey) + } else if len(related) > 0 && !shouldDelete { + result[keys[0]] = clause + } } else { - result[key] = clause + if shouldDelete { + delete(result, targetKey) + } else { + result[keys[0]] = clause + } } } else if opKey, isOpKey := targetKey.(*mapper.Operator); isOpKey { clauseArr, isArr := clause.([]any) @@ -58,13 +71,15 @@ func traverseFilterByProperty( if !ok { continue } - compoundCond := traverseFilterByProperty(key, elementMap, shouldDelete) + compoundCond := traverseFilterByProperty(keys, elementMap, shouldDelete) if len(compoundCond) > 0 { resultArr = append(resultArr, compoundCond) } } if len(resultArr) > 0 { result[opKey] = resultArr + } else if shouldDelete { + delete(result, opKey) } } } diff --git a/planner/filter/copy_field_test.go b/planner/filter/copy_field_test.go index d3ec10cf62..1714db55b6 100644 --- a/planner/filter/copy_field_test.go +++ b/planner/filter/copy_field_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/planner/mapper" "github.com/stretchr/testify/assert" @@ -21,7 +22,7 @@ import ( func TestCopyField(t *testing.T) { tests := []struct { name string - inputField mapper.Field + inputField []mapper.Field inputFilter map[string]any expectedFilter map[string]any }{ @@ -31,7 +32,7 @@ func TestCopyField(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: m("age", m("_gt", 55)), }, { @@ -40,7 +41,7 @@ func TestCopyField(t *testing.T) { m("name", m("_eq", "John")), m("age", m("_gt", 55)), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", m("age", m("_gt", 55)), ), @@ -59,7 +60,7 @@ func TestCopyField(t *testing.T) { m("age", m("_lt", 55)), ), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", r("_or", r("_and", @@ -71,13 +72,48 @@ func TestCopyField(t *testing.T) { ), ), }, + { + name: "field of related object", + inputFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookRatingInd}}, + expectedFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + ), + ), + ), + }, + { + name: "field of related object (deeper)", + inputFilter: r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookStoresInd}, {Index: storeAddressInd}}, + expectedFilter: r("_and", + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + ), + }, } mapping := getDocMapping() for _, test := range tests { t.Run(test.name, func(t *testing.T) { inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) - actualFilter := copyField(inputFilter, test.inputField) + actualFilter := CopyField(inputFilter, test.inputField...) expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, mapping) AssertEqualFilterMap(t, expectedFilter.Conditions, actualFilter.Conditions) }) @@ -85,6 +121,15 @@ func TestCopyField(t *testing.T) { } func TestCopyFieldOfNullFilter(t *testing.T) { - actualFilter := copyField(nil, mapper.Field{Index: 1}) + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} + +func TestCopyFieldWithNoFieldGiven(t *testing.T) { + filter := mapper.NewFilter() + filter.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + actualFilter := CopyField(filter) assert.Nil(t, actualFilter) } diff --git a/planner/filter/copy_test.go b/planner/filter/copy_test.go index ccb471c2b6..a45d368964 100644 --- a/planner/filter/copy_test.go +++ b/planner/filter/copy_test.go @@ -23,20 +23,20 @@ func TestCopyFilter(t *testing.T) { return map[connor.FilterKey]any{ &mapper.Operator{Operation: "_or"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 0}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorNameInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_eq"}: "Some name", }, }, map[connor.FilterKey]any{ &mapper.Operator{Operation: "_and"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorAgeInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_gt"}: 64, }, }, map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 2}: map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorPublishedInd}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: bookRatingInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_gt"}: 4.8, }, }, @@ -46,13 +46,13 @@ func TestCopyFilter(t *testing.T) { map[connor.FilterKey]any{ &mapper.Operator{Operation: "_and"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorAgeInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_lt"}: 64, }, }, map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 2}: map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorPublishedInd}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: bookRatingInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_lt"}: 4.8, }, }, diff --git a/planner/filter/extract_properties.go b/planner/filter/extract_properties.go new file mode 100644 index 0000000000..4c3e6bb0be --- /dev/null +++ b/planner/filter/extract_properties.go @@ -0,0 +1,78 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// Property represents a single field and is being filtered on. +// It contains the index of the field in the core.DocumentMapping +// as well as index -> Property map of the fields in case the field is an object. +type Property struct { + Index int + Fields map[int]Property +} + +func (p Property) IsRelation() bool { + return len(p.Fields) > 0 +} + +func mergeProps(p1, p2 Property) Property { + if p1.Index == 0 { + p1.Index = p2.Index + } + if p1.Fields == nil { + p1.Fields = p2.Fields + } else { + for k, v := range p2.Fields { + p1.Fields[k] = mergeProps(p1.Fields[k], v) + } + } + return p1 +} + +// ExtractProperties runs through the filter and returns a index -> Property map of the fields +// being filtered on. +func ExtractProperties(conditions map[connor.FilterKey]any) map[int]Property { + properties := map[int]Property{} + for k, v := range conditions { + switch typedKey := k.(type) { + case *mapper.PropertyIndex: + prop := properties[typedKey.Index] + prop.Index = typedKey.Index + relatedProps := ExtractProperties(v.(map[connor.FilterKey]any)) + properties[typedKey.Index] = mergeProps(prop, Property{Fields: relatedProps}) + case *mapper.Operator: + if typedKey.Operation == request.FilterOpAnd || typedKey.Operation == request.FilterOpOr { + compoundContent := v.([]any) + for _, compoundFilter := range compoundContent { + props := ExtractProperties(compoundFilter.(map[connor.FilterKey]any)) + for _, prop := range props { + existingProp := properties[prop.Index] + properties[prop.Index] = mergeProps(existingProp, prop) + } + } + } else if typedKey.Operation == request.FilterOpNot { + props := ExtractProperties(v.(map[connor.FilterKey]any)) + for _, prop := range props { + existingProp := properties[prop.Index] + properties[prop.Index] = mergeProps(existingProp, prop) + } + } + } + } + if len(properties) == 0 { + return nil + } + return properties +} diff --git a/planner/filter/extract_properties_test.go b/planner/filter/extract_properties_test.go new file mode 100644 index 0000000000..c90dbe85b0 --- /dev/null +++ b/planner/filter/extract_properties_test.go @@ -0,0 +1,115 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "reflect" + "testing" + + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/stretchr/testify/assert" +) + +func TestExtractProperties(t *testing.T) { + tests := []struct { + name string + inputFilter map[string]any + expectedFilter map[int]Property + }{ + { + name: "no nesting", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }, + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorAgeInd: {Index: authorAgeInd}, + }, + }, + { + name: "within _and, _or and _not", + inputFilter: r("_or", + m("name", m("_eq", "John")), + r("_and", + m("age", m("_gt", 55)), + m("_not", + r("_or", + m("verified", m("_eq", true)), + ), + ), + ), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorAgeInd: {Index: authorAgeInd}, + authorVerifiedInd: {Index: authorVerifiedInd}, + }, + }, + { + name: "related field", + inputFilter: r("_or", + m("name", m("_eq", "John")), + m("published", m("genre", m("_eq", "Comedy"))), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorPublishedInd: { + Index: authorPublishedInd, + Fields: map[int]Property{bookGenreInd: {Index: bookGenreInd}}, + }, + }, + }, + { + name: "several related field with deeper nesting", + inputFilter: r("_or", + m("name", m("_eq", "John")), + m("published", m("genre", m("_eq", "Comedy"))), + m("published", m("rating", m("_gt", 55))), + m("published", m("stores", m("name", m("_eq", "Amazon")))), + m("published", m("stores", m("address", m("_gt", "5th Avenue")))), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorPublishedInd: { + Index: authorPublishedInd, + Fields: map[int]Property{ + bookGenreInd: {Index: bookGenreInd}, + bookRatingInd: {Index: bookRatingInd}, + bookStoresInd: { + Index: bookStoresInd, + Fields: map[int]Property{ + storeNameInd: {Index: storeNameInd}, + storeAddressInd: {Index: storeAddressInd}, + }, + }, + }, + }, + }, + }, + } + + mapping := getDocMapping() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) + actualFilter := ExtractProperties(inputFilter.Conditions) + reflect.DeepEqual(test.expectedFilter, actualFilter) + assert.Equal(t, test.expectedFilter, actualFilter) + }) + } +} + +func TestExtractPropertiesOfNullFilter(t *testing.T) { + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} diff --git a/planner/filter/remove_field.go b/planner/filter/remove_field.go index 5c80ffc96c..5e9f2f532e 100644 --- a/planner/filter/remove_field.go +++ b/planner/filter/remove_field.go @@ -14,13 +14,17 @@ import ( ) // RemoveField removes the given field from the provided filter. -func RemoveField(filter *mapper.Filter, field mapper.Field) { - if filter == nil { +// Multiple fields can be passed to remove related objects with a certain field. +func RemoveField(filter *mapper.Filter, fields ...mapper.Field) { + if filter == nil || len(fields) == 0 { return } - conditionKey := &mapper.PropertyIndex{ - Index: field.Index, + var conditionKeys []*mapper.PropertyIndex + for _, field := range fields { + conditionKeys = append(conditionKeys, &mapper.PropertyIndex{ + Index: field.Index, + }) } - traverseFilterByProperty(conditionKey, filter.Conditions, true) + traverseFilterByProperty(conditionKeys, filter.Conditions, true) } diff --git a/planner/filter/remove_field_test.go b/planner/filter/remove_field_test.go index 2b6e8cdd3a..8a34999e60 100644 --- a/planner/filter/remove_field_test.go +++ b/planner/filter/remove_field_test.go @@ -13,13 +13,14 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/planner/mapper" ) func TestRemoveFieldFromFilter(t *testing.T) { tests := []struct { name string - inputField mapper.Field + inputField []mapper.Field inputFilter map[string]any expectedFilter map[string]any }{ @@ -29,7 +30,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: m("name", m("_eq", "John")), }, { @@ -38,7 +39,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { m("name", m("_eq", "John")), m("age", m("_gt", 55)), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", m("name", m("_eq", "John")), ), @@ -57,7 +58,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { m("age", m("_lt", 55)), ), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", r("_or", r("_and", @@ -69,13 +70,69 @@ func TestRemoveFieldFromFilter(t *testing.T) { ), ), }, + { + name: "remove _or/_and if only element", + inputFilter: r("_and", + r("_or", + r("_and", + m("age", m("_gt", 30)), + ), + ), + r("_or", + m("age", m("_lt", 55)), + ), + m("name", m("_eq", "Islam")), + ), + inputField: []mapper.Field{{Index: authorAgeInd}}, + expectedFilter: r("_and", + m("name", m("_eq", "Islam")), + ), + }, + { + name: "field of related object", + inputFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookRatingInd}}, + expectedFilter: r("_and", + r("_or", + r("_and", + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + }, + { + name: "field of related object (deeper)", + inputFilter: r("_and", + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("stores", m("name", m("_eq", "Barnes & Noble")))), + m("published", m("genre", m("_eq", "Comedy"))), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookStoresInd}, {Index: storeAddressInd}}, + expectedFilter: r("_and", + m("age", m("_gt", 30)), + m("published", m("stores", m("name", m("_eq", "Barnes & Noble")))), + m("published", m("genre", m("_eq", "Comedy"))), + ), + }, } mapping := getDocMapping() for _, test := range tests { t.Run(test.name, func(t *testing.T) { inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) - RemoveField(inputFilter, test.inputField) + RemoveField(inputFilter, test.inputField...) expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, mapping) AssertEqualFilterMap(t, expectedFilter.Conditions, inputFilter.Conditions) }) @@ -85,3 +142,17 @@ func TestRemoveFieldFromFilter(t *testing.T) { func TestRemoveFieldFromNullFilter(t *testing.T) { RemoveField(nil, mapper.Field{Index: 1}) } + +func TestRemoveFieldWithNoFieldGiven(t *testing.T) { + getFilter := func() *mapper.Filter { + f := mapper.NewFilter() + f.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + return f + } + f := getFilter() + RemoveField(f) + + AssertEqualFilter(t, getFilter(), f) +} diff --git a/planner/filter/split.go b/planner/filter/split.go index bba822145a..1ef153746b 100644 --- a/planner/filter/split.go +++ b/planner/filter/split.go @@ -27,8 +27,12 @@ func SplitByField(filter *mapper.Filter, field mapper.Field) (*mapper.Filter, *m return nil, nil } - splitF := copyField(filter, field) + splitF := CopyField(filter, field) RemoveField(filter, field) + if len(filter.Conditions) == 0 { + filter = nil + } + return filter, splitF } diff --git a/planner/filter/split_test.go b/planner/filter/split_test.go index 1bcbecffb7..86fbb0b44a 100644 --- a/planner/filter/split_test.go +++ b/planner/filter/split_test.go @@ -32,10 +32,28 @@ func TestSplitFilter(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: mapper.Field{Index: authorAgeInd}, expectedFilter1: m("name", m("_eq", "John")), expectedFilter2: m("age", m("_gt", 55)), }, + { + name: "the only field", + inputFilter: map[string]any{ + "age": m("_gt", 55), + }, + inputField: mapper.Field{Index: authorAgeInd}, + expectedFilter1: nil, + expectedFilter2: m("age", m("_gt", 55)), + }, + { + name: "no field to delete", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + }, + inputField: mapper.Field{Index: authorAgeInd}, + expectedFilter1: m("name", m("_eq", "John")), + expectedFilter2: nil, + }, } mapping := getDocMapping() @@ -45,14 +63,18 @@ func TestSplitFilter(t *testing.T) { actualFilter1, actualFilter2 := SplitByField(inputFilter, test.inputField) expectedFilter1 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter1}, mapping) expectedFilter2 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter2}, mapping) - AssertEqualFilterMap(t, expectedFilter1.Conditions, actualFilter1.Conditions) - AssertEqualFilterMap(t, expectedFilter2.Conditions, actualFilter2.Conditions) + if expectedFilter1 != nil || actualFilter1 != nil { + AssertEqualFilterMap(t, expectedFilter1.Conditions, actualFilter1.Conditions) + } + if expectedFilter2 != nil || actualFilter2 != nil { + AssertEqualFilterMap(t, expectedFilter2.Conditions, actualFilter2.Conditions) + } }) } } func TestSplitNullFilter(t *testing.T) { - actualFilter1, actualFilter2 := SplitByField(nil, mapper.Field{Index: 1}) + actualFilter1, actualFilter2 := SplitByField(nil, mapper.Field{Index: authorAgeInd}) assert.Nil(t, actualFilter1) assert.Nil(t, actualFilter2) } diff --git a/planner/filter/unwrap_relation.go b/planner/filter/unwrap_relation.go new file mode 100644 index 0000000000..aa1be2e25d --- /dev/null +++ b/planner/filter/unwrap_relation.go @@ -0,0 +1,86 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// UnwrapRelation runs through the filter and returns a new filter with only the +// fields of a given relation object +// Example: +// +// { +// "published": { +// "rating": { +// "_gt": 4.0 +// } +// } +// } +// +// with given "published" field will return +// +// { +// "rating": { +// "_gt": 4.0 +// } +// } +func UnwrapRelation(filter *mapper.Filter, field mapper.Field) *mapper.Filter { + if filter == nil { + return nil + } + conditionKey := &mapper.PropertyIndex{ + Index: field.Index, + } + + resultFilter := &mapper.Filter{} + conditionMap := traverseFilterAndExtract(conditionKey, filter.Conditions, false) + if len(conditionMap) > 0 { + resultFilter.Conditions = conditionMap + return resultFilter + } + return nil +} + +func traverseFilterAndExtract( + key *mapper.PropertyIndex, + conditions map[connor.FilterKey]any, + shouldDelete bool, +) map[connor.FilterKey]any { + result := make(map[connor.FilterKey]any) + for targetKey, clause := range conditions { + if targetKey.Equal(key) { + clauseMap := clause.(map[connor.FilterKey]any) + for k, v := range clauseMap { + result[k] = v + } + } else if opKey, isOpKey := targetKey.(*mapper.Operator); isOpKey { + clauseArr, isArr := clause.([]any) + if isArr { + resultArr := make([]any, 0) + for _, elementClause := range clauseArr { + elementMap, ok := elementClause.(map[connor.FilterKey]any) + if !ok { + continue + } + compoundCond := traverseFilterAndExtract(key, elementMap, shouldDelete) + if len(compoundCond) > 0 { + resultArr = append(resultArr, compoundCond) + } + } + if len(resultArr) > 0 { + result[opKey] = resultArr + } + } + } + } + return result +} diff --git a/planner/filter/unwrap_relation_test.go b/planner/filter/unwrap_relation_test.go new file mode 100644 index 0000000000..a7446f9d30 --- /dev/null +++ b/planner/filter/unwrap_relation_test.go @@ -0,0 +1,99 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/stretchr/testify/assert" +) + +func TestUnwrapRelation(t *testing.T) { + tests := []struct { + name string + inputFilter map[string]any + expectedFilter map[string]any + }{ + { + name: "simple", + inputFilter: m("published", m("rating", m("_gt", 4.0))), + expectedFilter: m("rating", m("_gt", 4.0)), + }, + { + name: "no relation object", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }, + expectedFilter: nil, + }, + { + name: "within _or and _and", + inputFilter: r("_and", + r("_or", + r("_and", + m("name", m("_eq", "John")), + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("rating", m("_gt", 4.0))), + ), + ), + r("_or", + m("published", m("stores", m("address", m("_eq", "2 Ave")))), + ), + m("published", m("genre", m("_eq", "Comedy"))), + ), + expectedFilter: r("_and", + r("_or", + r("_and", + m("stores", m("address", m("_eq", "123 Main St"))), + m("rating", m("_gt", 4.0)), + ), + ), + r("_or", + m("stores", m("address", m("_eq", "2 Ave"))), + ), + m("genre", m("_eq", "Comedy")), + ), + }, + } + + mapping := getDocMapping() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) + actualFilter := UnwrapRelation(inputFilter, mapper.Field{Index: authorPublishedInd}) + childMapping := mapping.ChildMappings[authorPublishedInd] + expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, childMapping) + if expectedFilter == nil && actualFilter == nil { + return + } + AssertEqualFilterMap(t, expectedFilter.Conditions, actualFilter.Conditions) + }) + } +} + +func TestUnwrapRelationOfNullFilter(t *testing.T) { + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} + +func TestUnwrapRelationWithNoFieldGiven(t *testing.T) { + filter := mapper.NewFilter() + filter.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + actualFilter := CopyField(filter) + assert.Nil(t, actualFilter) +} diff --git a/planner/filter/util_test.go b/planner/filter/util_test.go index e8860081c8..19b367172c 100644 --- a/planner/filter/util_test.go +++ b/planner/filter/util_test.go @@ -130,11 +130,55 @@ func r(op string, vals ...any) map[string]any { return m(op, vals) } +const ( + authorNameInd = iota + authorAgeInd + authorPublishedInd + authorVerifiedInd + authorNumFields +) + +const ( + bookRatingInd = iota + bookGenreInd + bookNameInd + bookStoresInd + bookNumFields +) + +const ( + storeAddressInd = iota + storeNameInd + storeNumFields +) + func getDocMapping() *core.DocumentMapping { + bookChildMappings := make([]*core.DocumentMapping, bookNumFields) + bookChildMappings[bookStoresInd] = &core.DocumentMapping{ + IndexesByName: map[string][]int{ + "address": {storeAddressInd}, + "name": {storeNameInd}, + }, + } + + authorChildMappings := make([]*core.DocumentMapping, authorNumFields) + authorChildMappings[authorPublishedInd] = &core.DocumentMapping{ + IndexesByName: map[string][]int{ + "rating": {bookRatingInd}, + "genre": {bookGenreInd}, + "name": {bookNameInd}, + "stores": {bookStoresInd}, + }, + ChildMappings: bookChildMappings, + } + return &core.DocumentMapping{ - IndexesByName: map[string][]int{"name": {0}, "age": {1}, "published": {2}, "verified": {3}}, - ChildMappings: []*core.DocumentMapping{nil, nil, { - IndexesByName: map[string][]int{"rating": {11}, "genre": {12}}, - }}, + IndexesByName: map[string][]int{ + "name": {authorNameInd}, + "age": {authorAgeInd}, + "published": {authorPublishedInd}, + "verified": {authorVerifiedInd}, + }, + ChildMappings: authorChildMappings, } } diff --git a/planner/mapper/targetable.go b/planner/mapper/targetable.go index bcfdb02ef8..0b571e6830 100644 --- a/planner/mapper/targetable.go +++ b/planner/mapper/targetable.go @@ -91,6 +91,17 @@ func (f *Filter) ToMap(mapping *core.DocumentMapping) map[string]any { return filterObjectToMap(mapping, f.Conditions) } +// HasIndex returns true if the filter has a condition that targets the +// a property with the given index. +func (f *Filter) HasIndex(index int) bool { + for k := range f.Conditions { + if propIndex, isOk := k.(*PropertyIndex); isOk && propIndex.Index == index { + return true + } + } + return false +} + func filterObjectToMap(mapping *core.DocumentMapping, obj map[connor.FilterKey]any) map[string]any { outmap := make(map[string]any) if obj == nil { diff --git a/planner/planner.go b/planner/planner.go index bcb0653633..7821b5aaaf 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -15,8 +15,10 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/planner/filter" "github.com/sourcenetwork/defradb/planner/mapper" ) @@ -296,16 +298,82 @@ func (p *Planner) expandMultiNode(multiNode MultiNode, parentPlan *selectTopNode return nil } +// expandTypeIndexJoinPlan does a plan graph expansion and other optimizations on typeIndexJoin. func (p *Planner) expandTypeIndexJoinPlan(plan *typeIndexJoin, parentPlan *selectTopNode) error { switch node := plan.joinPlan.(type) { case *typeJoinOne: - return p.expandPlan(node.subType, parentPlan) + return p.expandTypeJoin(&node.invertibleTypeJoin, parentPlan) case *typeJoinMany: - return p.expandPlan(node.subType, parentPlan) + return p.expandTypeJoin(&node.invertibleTypeJoin, parentPlan) } return client.NewErrUnhandledType("join plan", plan.joinPlan) } +func findFilteredByRelationFields( + conditions map[connor.FilterKey]any, + mapping *core.DocumentMapping, +) map[string]int { + filterProperties := filter.ExtractProperties(conditions) + filteredSubFields := make(map[string]int) + for _, prop := range filterProperties { + if childMapping := mapping.ChildMappings[prop.Index]; childMapping != nil { + if !prop.IsRelation() { + continue + } + for _, subProp := range prop.Fields { + for fieldName, indices := range childMapping.IndexesByName { + if indices[0] == subProp.Index { + filteredSubFields[fieldName] = subProp.Index + } + } + } + } + } + return filteredSubFields +} + +func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan *selectTopNode) error { + filteredSubFields := findFilteredByRelationFields( + parentPlan.selectNode.filter.Conditions, + node.documentMapping, + ) + slct := node.subType.(*selectTopNode).selectNode + desc := slct.sourceInfo.collectionDescription + indexedFields := desc.CollectIndexedFields(&desc.Schema) + for _, indField := range indexedFields { + if ind, ok := filteredSubFields[indField.Name]; ok { + subInd := node.documentMapping.FirstIndexOfName(node.subTypeName) + relatedField := mapper.Field{Name: node.subTypeName, Index: subInd} + fieldFilter := filter.UnwrapRelation(filter.CopyField( + parentPlan.selectNode.filter, + relatedField, + mapper.Field{Name: indField.Name, Index: ind}, + ), relatedField) + err := node.invertJoinDirectionWithIndex(fieldFilter, indField) + if err != nil { + return err + } + break + } + } + + return nil +} + +// expandTypeJoin does a plan graph expansion and other optimizations on invertibleTypeJoin. +func (p *Planner) expandTypeJoin(node *invertibleTypeJoin, parentPlan *selectTopNode) error { + if parentPlan.selectNode.filter == nil { + return p.expandPlan(node.subType, parentPlan) + } + + err := p.tryOptimizeJoinDirection(node, parentPlan) + if err != nil { + return err + } + + return p.expandPlan(node.subType, parentPlan) +} + func (p *Planner) expandGroupNodePlan(topNodeSelect *selectTopNode) error { var sourceNode planNode var hasJoinNode bool @@ -406,9 +474,9 @@ func (p *Planner) walkAndReplacePlan(planNode, target, replace planNode) error { case *selectNode: node.source = replace case *typeJoinOne: - node.root = replace + node.replaceRoot(replace) case *typeJoinMany: - node.root = replace + node.replaceRoot(replace) case *pipeNode: /* Do nothing - pipe nodes should not be replaced */ // @todo: add more nodes that apply here diff --git a/planner/scan.go b/planner/scan.go index 256711b34e..f9a80705cb 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -11,12 +11,15 @@ package planner import ( + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/lens" + "github.com/sourcenetwork/defradb/planner/filter" "github.com/sourcenetwork/defradb/planner/mapper" "github.com/sourcenetwork/defradb/request/graphql/parser" ) @@ -90,7 +93,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { n.tryAddField(requestable.GetName()) // select might have its own select fields and filters fields case *mapper.Select: - n.tryAddField(requestable.Field.Name + "_id") // foreign key for type joins + n.tryAddField(requestable.Field.Name + request.RelatedObjectID) // foreign key for type joins err := n.initFields(requestable.Fields) if err != nil { return err @@ -133,6 +136,32 @@ func (n *scanNode) tryAddField(fieldName string) bool { return true } +func (scan *scanNode) initFetcher( + cid immutable.Option[string], + indexedField immutable.Option[client.FieldDescription], +) { + var f fetcher.Fetcher + if cid.HasValue() { + f = new(fetcher.VersionedFetcher) + } else { + f = new(fetcher.DocumentFetcher) + + if indexedField.HasValue() { + typeIndex := scan.documentMapping.FirstIndexOfName(indexedField.Value().Name) + field := mapper.Field{Index: typeIndex, Name: indexedField.Value().Name} + var indexFilter *mapper.Filter + scan.filter, indexFilter = filter.SplitByField(scan.filter, field) + if indexFilter != nil { + fieldDesc, _ := scan.desc.Schema.GetField(indexedField.Value().Name) + f = fetcher.NewIndexFetcher(f, fieldDesc, indexFilter) + } + } + + f = lens.NewFetcher(f, scan.p.db.LensRegistry()) + } + scan.fetcher = f +} + // Start starts the internal logic of the scanner // like the DocumentFetcher, and more. func (n *scanNode) Start() error { @@ -237,6 +266,7 @@ func (n *scanNode) executeExplain() map[string]any { "iterations": n.execInfo.iterations, "docFetches": n.execInfo.fetches.DocsFetched, "fieldFetches": n.execInfo.fetches.FieldsFetched, + "indexFetches": n.execInfo.fetches.IndexesFetched, } } @@ -258,26 +288,17 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err // Merge implements mergeNode func (n *scanNode) Merge() bool { return true } -func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { - var f fetcher.Fetcher - if parsed.Cid.HasValue() { - f = new(fetcher.VersionedFetcher) - } else { - f = new(fetcher.DocumentFetcher) - f = lens.NewFetcher(f, p.db.LensRegistry()) - } +func (p *Planner) Scan( + mapperSelect *mapper.Select, + colDesc client.CollectionDescription, +) (*scanNode, error) { scan := &scanNode{ p: p, - fetcher: f, - slct: parsed, - docMapper: docMapper{parsed.DocumentMapping}, + slct: mapperSelect, + docMapper: docMapper{mapperSelect.DocumentMapping}, } - colDesc, err := p.getCollectionDesc(parsed.CollectionName) - if err != nil { - return nil, err - } - err = scan.initCollection(colDesc) + err := scan.initCollection(colDesc) if err != nil { return nil, err } @@ -294,8 +315,6 @@ func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { // we call Next() on the underlying scanNode only // once every 2 Next() calls on the multiScan type multiScanNode struct { - docMapper - scanNode *scanNode numReaders int numCalls int @@ -349,6 +368,10 @@ func (n *multiScanNode) Close() error { return n.scanNode.Close() } +func (n *multiScanNode) DocumentMap() *core.DocumentMapping { + return n.scanNode.DocumentMap() +} + func (n *multiScanNode) addReader() { n.numReaders++ } diff --git a/planner/select.go b/planner/select.go index 4fb9b143f2..21524ed31f 100644 --- a/planner/select.go +++ b/planner/select.go @@ -14,6 +14,7 @@ import ( cid "github.com/ipfs/go-cid" "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" @@ -250,10 +251,10 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { // apply the root filter to the source // and rootSubType filters to the selectNode // @todo: simulate splitting for now - origScan, ok := n.source.(*scanNode) - if ok { - origScan.filter = n.filter + origScan, isScanNode := n.source.(*scanNode) + if isScanNode { origScan.showDeleted = n.selectReq.ShowDeleted + origScan.filter = n.filter n.filter = nil // If we have both a DocKey and a CID, then we need to run @@ -285,7 +286,31 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { } } - return n.initFields(n.selectReq) + aggregates, err := n.initFields(n.selectReq) + if err != nil { + return nil, err + } + + if isScanNode { + origScan.initFetcher(n.selectReq.Cid, findFilteredByIndexedField(origScan)) + } + + return aggregates, nil +} + +func findFilteredByIndexedField(scanNode *scanNode) immutable.Option[client.FieldDescription] { + if scanNode.filter != nil { + indexedFields := scanNode.desc.CollectIndexedFields(&scanNode.desc.Schema) + for i := range indexedFields { + typeIndex := scanNode.documentMapping.FirstIndexOfName(indexedFields[i].Name) + if scanNode.filter.HasIndex(typeIndex) { + // we return the first found indexed field to keep it simple for now + // more sophisticated optimization logic can be added later + return immutable.Some(indexedFields[i]) + } + } + } + return immutable.None[client.FieldDescription]() } func (n *selectNode) initFields(selectReq *mapper.Select) ([]aggregateNode, error) { @@ -375,31 +400,6 @@ func (n *selectNode) addTypeIndexJoin(subSelect *mapper.Select) error { func (n *selectNode) Source() planNode { return n.source } -// func appendSource() {} - -// func (n *selectNode) initRender( -// fields []*client.FieldDescription, -// aliases []string, -//) error { -// return n.planner.render(fields, aliases) -// } - -// SubSelect is used for creating Select nodes used on sub selections, -// not to be used on the top level selection node. -// This allows us to disable rendering on all sub Select nodes -// and only run it at the end on the top level select node. -func (p *Planner) SubSelect(selectReq *mapper.Select) (planNode, error) { - plan, err := p.Select(selectReq) - if err != nil { - return nil, err - } - - // if this is a sub select plan, we need to remove the render node - // as the final top level selectTopNode will handle all sub renders - top := plan.(*selectTopNode) - return top, nil -} - func (p *Planner) SelectFromSource( selectReq *mapper.Select, source planNode, @@ -424,12 +424,12 @@ func (p *Planner) SelectFromSource( } if fromCollection { - desc, err := p.getCollectionDesc(selectReq.Name) + col, err := p.db.GetCollectionByName(p.ctx, selectReq.Name) if err != nil { return nil, err } - s.sourceInfo = sourceInfo{desc} + s.sourceInfo = sourceInfo{col.Description()} } aggregates, err := s.initFields(selectReq) diff --git a/planner/sum.go b/planner/sum.go index 0e1690898e..c5ef06a03a 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -77,12 +77,12 @@ func (p *Planner) isValueFloat( } if !source.ChildTarget.HasValue { - parentDescription, err := p.getCollectionDesc(parent.CollectionName) + parentCol, err := p.db.GetCollectionByName(p.ctx, parent.CollectionName) if err != nil { return false, err } - fieldDescription, fieldDescriptionFound := parentDescription.Schema.GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentCol.Description().Schema.GetField(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } @@ -125,12 +125,12 @@ func (p *Planner) isValueFloat( return false, nil } - childCollectionDescription, err := p.getCollectionDesc(child.CollectionName) + childCol, err := p.db.GetCollectionByName(p.ctx, child.CollectionName) if err != nil { return false, err } - fieldDescription, fieldDescriptionFound := childCollectionDescription.Schema.GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCol.Description().Schema.GetField(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } diff --git a/planner/type_join.go b/planner/type_join.go index ee771b01fc..6e5d9a0d49 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -153,14 +153,14 @@ func (n *typeIndexJoin) simpleExplain() (map[string]any, error) { switch joinType := n.joinPlan.(type) { case *typeJoinOne: // Add the direction attribute. - if joinType.primary { - simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel - } else { + if joinType.isSecondary { simpleExplainMap[joinDirectionLabel] = joinDirectionSecondaryLabel + } else { + simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel } // Add the attribute(s). - simpleExplainMap[joinRootLabel] = joinType.subTypeFieldName + simpleExplainMap[joinRootLabel] = joinType.rootName simpleExplainMap[joinSubTypeNameLabel] = joinType.subTypeName subTypeExplainGraph, err := buildSimpleExplainGraph(joinType.subType) @@ -199,9 +199,24 @@ func (n *typeIndexJoin) Explain(explainType request.ExplainType) (map[string]any return n.simpleExplain() case request.ExecuteExplain: - return map[string]any{ + result := map[string]any{ "iterations": n.execInfo.iterations, - }, nil + } + var subScan *scanNode + if joinMany, isJoinMany := n.joinPlan.(*typeJoinMany); isJoinMany { + subScan = getScanNode(joinMany.subType) + } + if joinOne, isJoinOne := n.joinPlan.(*typeJoinOne); isJoinOne { + subScan = getScanNode(joinOne.subType) + } + if subScan != nil { + subScanExplain, err := subScan.Explain(explainType) + if err != nil { + return nil, err + } + result["subTypeScanNode"] = subScanExplain + } + return result, nil default: return nil, ErrUnknownExplainRequestType @@ -214,22 +229,7 @@ func (n *typeIndexJoin) Merge() bool { return true } // typeJoinOne is the plan node for a type index join // where the root type is the primary in a one-to-one relation request. type typeJoinOne struct { - documentIterator - docMapper - - p *Planner - - root planNode - subType planNode - - subTypeName string - subTypeFieldName string - - primary bool - secondaryFieldIndex immutable.Option[int] - - spans core.Spans - subSelect *mapper.Select + invertibleTypeJoin } func (p *Planner) makeTypeJoinOne( @@ -239,7 +239,7 @@ func (p *Planner) makeTypeJoinOne( ) (*typeJoinOne, error) { prepareScanNodeFilterForTypeJoin(parent, source, subType) - selectPlan, err := p.SubSelect(subType) + selectPlan, err := p.Select(subType) if err != nil { return nil, err } @@ -254,12 +254,12 @@ func (p *Planner) makeTypeJoinOne( // check if the field we're querying is the primary side of the relation isPrimary := subTypeFieldDesc.RelationType.IsSet(client.Relation_Type_Primary) - subTypeCollectionDesc, err := p.getCollectionDesc(subType.CollectionName) + subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) if err != nil { return nil, err } - subTypeField, subTypeFieldNameFound := subTypeCollectionDesc.GetFieldByRelation( + subTypeField, subTypeFieldNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, parent.sourceInfo.collectionDescription.Name, subTypeFieldDesc.Name, @@ -276,16 +276,26 @@ func (p *Planner) makeTypeJoinOne( ) } + dir := joinDirection{ + firstNode: source, + secondNode: selectPlan, + secondaryField: subTypeField.Name + request.RelatedObjectID, + primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, + } + return &typeJoinOne{ - p: p, - root: source, - subSelect: subType, - subTypeName: subType.Name, - subTypeFieldName: subTypeField.Name, - subType: selectPlan, - primary: isPrimary, - secondaryFieldIndex: secondaryFieldIndex, - docMapper: docMapper{parent.documentMapping}, + invertibleTypeJoin: invertibleTypeJoin{ + docMapper: docMapper{parent.documentMapping}, + root: source, + subType: selectPlan, + subSelect: subType, + rootName: subTypeField.Name, + subTypeName: subType.Name, + isSecondary: !isPrimary, + secondaryFieldIndex: secondaryFieldIndex, + secondaryFetchLimit: 1, + dir: dir, + }, }, nil } @@ -293,139 +303,36 @@ func (n *typeJoinOne) Kind() string { return "typeJoinOne" } -func (n *typeJoinOne) Init() error { - if err := n.subType.Init(); err != nil { - return err - } - return n.root.Init() -} - -func (n *typeJoinOne) Start() error { - if err := n.subType.Start(); err != nil { - return err - } - return n.root.Start() -} - -func (n *typeJoinOne) Spans(spans core.Spans) { - n.root.Spans(spans) -} - -func (n *typeJoinOne) Next() (bool, error) { - hasNext, err := n.root.Next() - if err != nil || !hasNext { - return hasNext, err - } - - doc := n.root.Value() - if n.primary { - n.currentValue, err = n.valuesPrimary(doc) - } else { - n.currentValue, err = n.valuesSecondary(doc) - } +func fetchDocsWithFieldValue(plan planNode, fieldName string, val any, limit uint) ([]core.Doc, error) { + propIndex := plan.DocumentMap().FirstIndexOfName(fieldName) + setSubTypeFilterToScanNode(plan, propIndex, val) - if err != nil { - return false, err + if err := plan.Init(); err != nil { + return nil, NewErrSubTypeInit(err) } - return true, nil -} - -func (n *typeJoinOne) valuesSecondary(doc core.Doc) (core.Doc, error) { - propIndex := n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + request.RelatedObjectID) - // using the doc._key as a filter - setSubTypeFilterToScanNode(n.subType, propIndex, doc.GetKey()) - - // We have to reset the scan node after appending the new key-filter - if err := n.subType.Init(); err != nil { - return doc, NewErrSubTypeInit(err) - } - - next, err := n.subType.Next() - if !next || err != nil { - return doc, err - } - - subDoc := n.subType.Value() - doc.Fields[n.subSelect.Index] = subDoc - - if n.secondaryFieldIndex.HasValue() { - doc.Fields[n.secondaryFieldIndex.Value()] = subDoc.GetKey() - } - - return doc, nil -} - -func (n *typeJoinOne) valuesPrimary(doc core.Doc) (core.Doc, error) { - // get the subtype doc key - subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+request.RelatedObjectID) - - subDocKeyStr, ok := subDocKey.(string) - if !ok { - return doc, nil - } - - // create the collection key for the sub doc - slct := n.subType.(*selectTopNode).selectNode - desc := slct.sourceInfo.collectionDescription - subKeyIndexKey := base.MakeDocKey(desc, subDocKeyStr) - - // reset span - n.spans = core.NewSpans(core.NewSpan(subKeyIndexKey, subKeyIndexKey.PrefixEnd())) - - // do a point lookup with the new span (index key) - n.subType.Spans(n.spans) - - // re-initialize the sub type plan - if err := n.subType.Init(); err != nil { - return doc, NewErrSubTypeInit(err) - } - - // if we don't find any docs from our point span lookup - // or if we encounter an error just return the base doc, - // with an empty map for the subDoc - next, err := n.subType.Next() + docs := make([]core.Doc, 0, limit) + for { + next, err := plan.Next() + if err != nil { + return nil, err + } + if !next { + break + } - if err != nil { - return doc, err - } + docs = append(docs, plan.Value()) - if !next { - return doc, nil + if limit > 0 && len(docs) >= int(limit) { + break + } } - subDoc := n.subType.Value() - doc.Fields[n.subSelect.Index] = subDoc - - return doc, nil -} - -func (n *typeJoinOne) Close() error { - err := n.root.Close() - if err != nil { - return err - } - return n.subType.Close() + return docs, nil } -func (n *typeJoinOne) Source() planNode { return n.root } - type typeJoinMany struct { - documentIterator - docMapper - - p *Planner - - // the main type that is at the parent level of the request. - root planNode - rootName string - // the index to use to gather the subtype IDs - index *scanNode - // the subtype plan to get the subtype docs - subType planNode - subTypeName string - - subSelect *mapper.Select + invertibleTypeJoin } func prepareScanNodeFilterForTypeJoin( @@ -470,7 +377,7 @@ func (p *Planner) makeTypeJoinMany( ) (*typeJoinMany, error) { prepareScanNodeFilterForTypeJoin(parent, source, subType) - selectPlan, err := p.SubSelect(subType) + selectPlan, err := p.Select(subType) if err != nil { return nil, err } @@ -480,12 +387,12 @@ func (p *Planner) makeTypeJoinMany( return nil, client.NewErrFieldNotExist(subType.Name) } - subTypeCollectionDesc, err := p.getCollectionDesc(subType.CollectionName) + subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) if err != nil { return nil, err } - rootField, rootNameFound := subTypeCollectionDesc.GetFieldByRelation( + rootField, rootNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, parent.sourceInfo.collectionDescription.Name, subTypeFieldDesc.Name, @@ -495,14 +402,25 @@ func (p *Planner) makeTypeJoinMany( return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) } + dir := joinDirection{ + firstNode: source, + secondNode: selectPlan, + secondaryField: rootField.Name + request.RelatedObjectID, + primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, + } + return &typeJoinMany{ - p: p, - root: source, - subSelect: subType, - subTypeName: subType.Name, - rootName: rootField.Name, - subType: selectPlan, - docMapper: docMapper{parent.documentMapping}, + invertibleTypeJoin: invertibleTypeJoin{ + docMapper: docMapper{parent.documentMapping}, + root: source, + subType: selectPlan, + subSelect: subType, + rootName: rootField.Name, + isSecondary: true, + subTypeName: subType.Name, + secondaryFetchLimit: 0, + dir: dir, + }, }, nil } @@ -510,77 +428,194 @@ func (n *typeJoinMany) Kind() string { return "typeJoinMany" } -func (n *typeJoinMany) Init() error { - if err := n.subType.Init(); err != nil { - return err +func fetchPrimaryDoc(node, subNode planNode, parentProp string) (bool, error) { + subDoc := subNode.Value() + ind := subNode.DocumentMap().FirstIndexOfName(parentProp) + + docKeyStr, isStr := subDoc.Fields[ind].(string) + if !isStr { + return false, nil + } + + scan := getScanNode(node) + if scan == nil { + return false, nil + } + rootDocKey := base.MakeDocKey(scan.desc, docKeyStr) + + spans := core.NewSpans(core.NewSpan(rootDocKey, rootDocKey.PrefixEnd())) + + node.Spans(spans) + + if err := node.Init(); err != nil { + return false, NewErrSubTypeInit(err) + } + + hasValue, err := node.Next() + + if err != nil || !hasValue { + return false, err + } + + return true, nil +} + +type joinDirection struct { + firstNode planNode + secondNode planNode + secondaryField string + primaryField string + isInverted bool +} + +func (dir *joinDirection) invert() { + dir.isInverted = !dir.isInverted + dir.firstNode, dir.secondNode = dir.secondNode, dir.firstNode + dir.secondaryField, dir.primaryField = dir.primaryField, dir.secondaryField +} + +type invertibleTypeJoin struct { + documentIterator + docMapper + + root planNode + subType planNode + rootName string + subTypeName string + + subSelect *mapper.Select + + isSecondary bool + secondaryFieldIndex immutable.Option[int] + secondaryFetchLimit uint + + dir joinDirection +} + +func (join *invertibleTypeJoin) replaceRoot(node planNode) { + join.root = node + if join.dir.isInverted { + join.dir.secondNode = node + } else { + join.dir.firstNode = node } - return n.root.Init() } -func (n *typeJoinMany) Start() error { - if err := n.subType.Start(); err != nil { +func (join *invertibleTypeJoin) Init() error { + if err := join.subType.Init(); err != nil { return err } - return n.root.Start() + return join.root.Init() } -func (n *typeJoinMany) Spans(spans core.Spans) { - n.root.Spans(spans) +func (join *invertibleTypeJoin) Start() error { + if err := join.subType.Start(); err != nil { + return err + } + return join.root.Start() } -func (n *typeJoinMany) Next() (bool, error) { - hasNext, err := n.root.Next() - if err != nil || !hasNext { - return hasNext, err +func (join *invertibleTypeJoin) Close() error { + if err := join.root.Close(); err != nil { + return err } - n.currentValue = n.root.Value() + return join.subType.Close() +} + +func (join *invertibleTypeJoin) Spans(spans core.Spans) { + join.root.Spans(spans) +} + +func (join *invertibleTypeJoin) Source() planNode { return join.root } + +func (tj *invertibleTypeJoin) invert() { + tj.dir.invert() + tj.isSecondary = !tj.isSecondary +} - // check if theres an index - // if there is, scan and aggregate results - // if not, then manually scan the subtype table - subDocs := make([]core.Doc, 0) - if n.index != nil { - // @todo: handle index for one-to-many setup +func (join *invertibleTypeJoin) processSecondResult(secondDocs []core.Doc) (any, any) { + var secondResult any + var secondIDResult any + if join.secondaryFetchLimit == 1 { + if len(secondDocs) != 0 { + secondResult = secondDocs[0] + secondIDResult = secondDocs[0].GetKey() + } } else { - propIndex := n.subSelect.FirstIndexOfName(n.rootName + request.RelatedObjectID) - // using the doc._key as a filter - setSubTypeFilterToScanNode(n.subType, propIndex, n.currentValue.GetKey()) + secondResult = secondDocs + secondDocKeys := make([]string, len(secondDocs)) + for i, doc := range secondDocs { + secondDocKeys[i] = doc.GetKey() + } + secondIDResult = secondDocKeys + } + join.root.Value().Fields[join.subSelect.Index] = secondResult + if join.secondaryFieldIndex.HasValue() { + join.root.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult + } + return secondResult, secondIDResult +} + +func (join *invertibleTypeJoin) Next() (bool, error) { + hasFirstValue, err := join.dir.firstNode.Next() + + if err != nil || !hasFirstValue { + return false, err + } - // reset scan node - if err := n.subType.Init(); err != nil { + firstDoc := join.dir.firstNode.Value() + + if join.isSecondary { + secondDocs, err := fetchDocsWithFieldValue( + join.dir.secondNode, + join.dir.secondaryField, + firstDoc.GetKey(), + join.secondaryFetchLimit, + ) + if err != nil { return false, err } - - for { - next, err := n.subType.Next() - if err != nil { - return false, err - } - if !next { - break + if join.dir.secondNode == join.root { + join.root.Value().Fields[join.subSelect.Index] = join.subType.Value() + } else { + secondResult, secondIDResult := join.processSecondResult(secondDocs) + join.dir.firstNode.Value().Fields[join.subSelect.Index] = secondResult + if join.secondaryFieldIndex.HasValue() { + join.dir.firstNode.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult } + } + } else { + hasDoc, err := fetchPrimaryDoc(join.dir.secondNode, join.dir.firstNode, join.dir.primaryField) + if err != nil { + return false, err + } - subDoc := n.subType.Value() - subDocs = append(subDocs, subDoc) + if hasDoc { + join.root.Value().Fields[join.subSelect.Index] = join.subType.Value() } } - n.currentValue.Fields[n.subSelect.Index] = subDocs + join.currentValue = join.root.Value() + return true, nil } -func (n *typeJoinMany) Close() error { - if err := n.root.Close(); err != nil { - return err - } +func (join *invertibleTypeJoin) invertJoinDirectionWithIndex( + fieldFilter *mapper.Filter, + field client.FieldDescription, +) error { + subScan := getScanNode(join.subType) + subScan.tryAddField(join.rootName + request.RelatedObjectID) + subScan.filter = fieldFilter + subScan.initFetcher(immutable.Option[string]{}, immutable.Some(field)) - return n.subType.Close() -} + join.invert() -func (n *typeJoinMany) Source() planNode { return n.root } + return nil +} -func setSubTypeFilterToScanNode(plan planNode, propIndex int, key string) { +func setSubTypeFilterToScanNode(plan planNode, propIndex int, val any) { scan := getScanNode(plan) if scan == nil { return @@ -593,7 +628,7 @@ func setSubTypeFilterToScanNode(plan planNode, propIndex int, key string) { propertyIndex := &mapper.PropertyIndex{Index: propIndex} filterConditions := map[connor.FilterKey]any{ propertyIndex: map[connor.FilterKey]any{ - mapper.FilterEqOp: key, + mapper.FilterEqOp: val, }, } @@ -609,6 +644,11 @@ func getScanNode(plan planNode) *scanNode { return scanNode } node = node.Source() + if node == nil { + if topSelect, ok := plan.(*selectTopNode); ok { + node = topSelect.selectNode + } + } } return nil } diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index 4ffe998d88..184ca8c2ec 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -85,7 +85,7 @@ func ConstructSchema(fixture fixtures.Generator) (string, error) { // loop to get the schemas for i := 0; i < numTypes; i++ { - gql, err := fixtures.ExtractGQLFromType(fixture.Types()[i]) + gql, err := fixture.ExtractGQLFromType(fixture.Types()[i]) if err != nil { return "", errors.Wrap("failed generating GQL", err) } diff --git a/tests/bench/fixtures/fixtures.go b/tests/bench/fixtures/fixtures.go index 7b19b58f68..65ecf94e22 100644 --- a/tests/bench/fixtures/fixtures.go +++ b/tests/bench/fixtures/fixtures.go @@ -16,6 +16,7 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "github.com/bxcodec/faker" @@ -28,19 +29,41 @@ var ( } ) +type Option func(*Generator) + +func OptionFieldDirective(typeName, field, directive string) Option { + return func(g *Generator) { + if g.directives == nil { + g.directives = make(map[string]map[string][]string) + } + if g.directives[typeName] == nil { + g.directives[typeName] = make(map[string][]string) + } + g.directives[typeName][field] = append(g.directives[typeName][field], directive) + } +} + type Generator struct { ctx context.Context schema string types []any + // map of type name to field name to list of directives + directives map[string]map[string][]string } -func ForSchema(ctx context.Context, schemaName string) Generator { - return Generator{ +func ForSchema(ctx context.Context, schemaName string, options ...Option) Generator { + g := Generator{ ctx: ctx, schema: schemaName, types: registeredFixtures[schemaName], } + + for _, o := range options { + o(&g) + } + + return g } // Types returns the defined types for this fixture set @@ -85,7 +108,7 @@ func (g Generator) GenerateDocs() ([]string, error) { // extractGQLFromType extracts a GraphQL SDL definition as a string // from a given type struct -func ExtractGQLFromType(t any) (string, error) { +func (g Generator) ExtractGQLFromType(t any) (string, error) { var buf bytes.Buffer if reflect.TypeOf(t).Kind() != reflect.Struct { @@ -104,7 +127,17 @@ func ExtractGQLFromType(t any) (string, error) { fname := f.Name ftype := f.Type.Name() gqlType := gTypeToGQLType[ftype] - fmt.Fprintf(&buf, "\t%s: %s\n", fname, gqlType) + + directives := "" + if g.directives != nil { + if dirsMap, ok := g.directives[name]; ok { + if dirs, ok := dirsMap[fname]; ok { + directives = " " + strings.Join(dirs, " ") + } + } + } + // write field's name, type and directives + fmt.Fprintf(&buf, "\t%s: %s%s\n", fname, gqlType, directives) } fmt.Fprint(&buf, "}") diff --git a/tests/bench/query/index/simple_test.go b/tests/bench/query/index/simple_test.go new file mode 100644 index 0000000000..e675086a2a --- /dev/null +++ b/tests/bench/query/index/simple_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "context" + "testing" + + "github.com/sourcenetwork/defradb/tests/bench/fixtures" + query "github.com/sourcenetwork/defradb/tests/bench/query/simple" +) + +var ( + userSimpleWithFilterQuery = ` + query { + User(filter: { Age: { _eq: 30 } }) { + _key + Name + Age + Points + Verified + } + } + ` +) + +func makeUserAgeIndexOption() fixtures.Option { + return fixtures.OptionFieldDirective("User", "Age", "@index") +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_1(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 1, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_10(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 10, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_1000(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 1000, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_10000(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 10000, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} diff --git a/tests/bench/query/simple/simple_test.go b/tests/bench/query/simple/simple_test.go index ca0d627275..a9791bcbc7 100644 --- a/tests/bench/query/simple/simple_test.go +++ b/tests/bench/query/simple/simple_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/utils.go b/tests/bench/query/simple/utils.go index 32e6525dc7..e7f374dc40 100644 --- a/tests/bench/query/simple/utils.go +++ b/tests/bench/query/simple/utils.go @@ -27,7 +27,7 @@ var ( // log = logging.MustNewLogger("bench") ) -func runQueryBenchGet( +func RunQueryBenchGet( b *testing.B, ctx context.Context, fixture fixtures.Generator, diff --git a/tests/bench/query/simple/with_filter_test.go b/tests/bench/query/simple/with_filter_test.go index 86323e2beb..60081167a3 100644 --- a/tests/bench/query/simple/with_filter_test.go +++ b/tests/bench/query/simple/with_filter_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_limit_offset_test.go b/tests/bench/query/simple/with_limit_offset_test.go index 97dc523455..e47d8f347e 100644 --- a/tests/bench/query/simple/with_limit_offset_test.go +++ b/tests/bench/query/simple/with_limit_offset_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_multi_lookup_test.go b/tests/bench/query/simple/with_multi_lookup_test.go index 6af7b6e20a..2c744319a3 100644 --- a/tests/bench/query/simple/with_multi_lookup_test.go +++ b/tests/bench/query/simple/with_multi_lookup_test.go @@ -34,7 +34,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -49,7 +49,7 @@ func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -64,7 +64,7 @@ func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_order_test.go b/tests/bench/query/simple/with_order_test.go index ec1a757f2b..2b12817713 100644 --- a/tests/bench/query/simple/with_order_test.go +++ b/tests/bench/query/simple/with_order_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithSort_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_single_lookup_test.go b/tests/bench/query/simple/with_single_lookup_test.go index a2fb7e3b59..d432f730be 100644 --- a/tests/bench/query/simple/with_single_lookup_test.go +++ b/tests/bench/query/simple/with_single_lookup_test.go @@ -34,7 +34,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -49,7 +49,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -64,7 +64,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -79,7 +79,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/integration/explain/execute/create_test.go b/tests/integration/explain/execute/create_test.go index e8ab75d48a..bd99ab39a4 100644 --- a/tests/integration/explain/execute/create_test.go +++ b/tests/integration/explain/execute/create_test.go @@ -48,6 +48,7 @@ func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(1), "fieldFetches": uint64(1), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/delete_test.go b/tests/integration/explain/execute/delete_test.go index 13411b5f5e..e924ce334c 100644 --- a/tests/integration/explain/execute/delete_test.go +++ b/tests/integration/explain/execute/delete_test.go @@ -51,6 +51,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(1), "fieldFetches": uint64(1), + "indexFetches": uint64(0), }, }, }, @@ -99,6 +100,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/group_test.go b/tests/integration/explain/execute/group_test.go index 3b7e42c845..9d4dc096f9 100644 --- a/tests/integration/explain/execute/group_test.go +++ b/tests/integration/explain/execute/group_test.go @@ -59,6 +59,7 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/query_deleted_docs_test.go b/tests/integration/explain/execute/query_deleted_docs_test.go index 7642873b7f..cb1ebbcaa7 100644 --- a/tests/integration/explain/execute/query_deleted_docs_test.go +++ b/tests/integration/explain/execute/query_deleted_docs_test.go @@ -56,6 +56,7 @@ func TestExecuteExplainQueryDeletedDocs(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/scan_test.go b/tests/integration/explain/execute/scan_test.go index 85bd64229c..a68f175015 100644 --- a/tests/integration/explain/execute/scan_test.go +++ b/tests/integration/explain/execute/scan_test.go @@ -67,6 +67,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -109,6 +110,7 @@ func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(0), "fieldFetches": uint64(0), + "indexFetches": uint64(0), }, }, }, @@ -172,6 +174,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -235,6 +238,7 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/top_level_test.go b/tests/integration/explain/execute/top_level_test.go index 6afa9cbfb2..360c9a3d2c 100644 --- a/tests/integration/explain/execute/top_level_test.go +++ b/tests/integration/explain/execute/top_level_test.go @@ -70,6 +70,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -153,6 +154,7 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -227,6 +229,7 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/type_join_test.go b/tests/integration/explain/execute/type_join_test.go index 8e26f423bb..eb1e187485 100644 --- a/tests/integration/explain/execute/type_join_test.go +++ b/tests/integration/explain/execute/type_join_test.go @@ -56,6 +56,13 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -115,6 +122,13 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -125,6 +139,13 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -187,6 +208,13 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/update_test.go b/tests/integration/explain/execute/update_test.go index d9469e4b4e..a1fa92b091 100644 --- a/tests/integration/explain/execute/update_test.go +++ b/tests/integration/explain/execute/update_test.go @@ -59,6 +59,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { "iterations": uint64(6), "docFetches": uint64(4), "fieldFetches": uint64(8), + "indexFetches": uint64(0), }, }, }, @@ -116,6 +117,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(4), "fieldFetches": uint64(6), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_average_test.go b/tests/integration/explain/execute/with_average_test.go index a3070e8c42..9e906c475d 100644 --- a/tests/integration/explain/execute/with_average_test.go +++ b/tests/integration/explain/execute/with_average_test.go @@ -56,6 +56,7 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(3), "fieldFetches": uint64(5), + "indexFetches": uint64(0), }, }, }, @@ -116,6 +117,13 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(12), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_count_test.go b/tests/integration/explain/execute/with_count_test.go index 236d0bf8af..4a30b9f52a 100644 --- a/tests/integration/explain/execute/with_count_test.go +++ b/tests/integration/explain/execute/with_count_test.go @@ -57,6 +57,13 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(14), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_limit_test.go b/tests/integration/explain/execute/with_limit_test.go index 9a65ec1ec3..88a1666ca3 100644 --- a/tests/integration/explain/execute/with_limit_test.go +++ b/tests/integration/explain/execute/with_limit_test.go @@ -51,6 +51,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -107,6 +108,13 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(4), + "fieldFetches": uint64(6), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_order_test.go b/tests/integration/explain/execute/with_order_test.go index d5b7ccfaed..9155523b20 100644 --- a/tests/integration/explain/execute/with_order_test.go +++ b/tests/integration/explain/execute/with_order_test.go @@ -52,6 +52,7 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -135,6 +136,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { "iterations": uint64(5), "docFetches": uint64(4), "fieldFetches": uint64(8), + "indexFetches": uint64(0), }, }, }, @@ -189,6 +191,13 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, @@ -246,6 +255,13 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_sum_test.go b/tests/integration/explain/execute/with_sum_test.go index c6df56c2e0..c37e3d0309 100644 --- a/tests/integration/explain/execute/with_sum_test.go +++ b/tests/integration/explain/execute/with_sum_test.go @@ -52,6 +52,7 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(3), "fieldFetches": uint64(5), + "indexFetches": uint64(0), }, }, }, @@ -110,6 +111,13 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain_result_asserter.go b/tests/integration/explain_result_asserter.go new file mode 100644 index 0000000000..30126d4fe4 --- /dev/null +++ b/tests/integration/explain_result_asserter.go @@ -0,0 +1,162 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/sourcenetwork/immutable" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + iterationsProp = "iterations" + docFetchesProp = "docFetches" + fieldFetchesProp = "fieldFetches" + indexFetchesProp = "indexFetches" +) + +type dataMap = map[string]any + +// ExplainResultAsserter is a helper for asserting the result of an explain query. +// It allows asserting on a selected set of properties. +type ExplainResultAsserter struct { + iterations immutable.Option[int] + docFetches immutable.Option[int] + fieldFetches immutable.Option[int] + indexFetches immutable.Option[int] + filterMatches immutable.Option[int] + sizeOfResults immutable.Option[int] + planExecutions immutable.Option[uint64] +} + +func readNumberProp(t *testing.T, val any, prop string) uint64 { + switch v := val.(type) { + case uint64: + return v + case json.Number: + n, err := v.Int64() + require.NoError(t, err, fmt.Sprintf("Expected %s property to be a uint64", prop)) + return uint64(n) + default: + require.Fail(t, fmt.Sprintf("Unexpected type for %s property: %T", prop, val)) + } + return 0 +} + +func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { + require.Len(t, result, 1, "Expected len(result) = 1, got %d", len(result)) + explainNode, ok := result[0]["explain"].(dataMap) + require.True(t, ok, "Expected explain none") + assert.Equal(t, explainNode["executionSuccess"], true, "Expected executionSuccess property") + if a.sizeOfResults.HasValue() { + actual := explainNode["sizeOfResult"] + assert.Equal(t, actual, a.sizeOfResults.Value(), + "Expected %d sizeOfResult, got %d", a.sizeOfResults.Value(), actual) + } + if a.planExecutions.HasValue() { + actual := explainNode["planExecutions"] + assert.Equal(t, actual, a.planExecutions.Value(), + "Expected %d planExecutions, got %d", a.planExecutions.Value(), actual) + } + selectTopNode, ok := explainNode["selectTopNode"].(dataMap) + require.True(t, ok, "Expected selectTopNode") + selectNode, ok := selectTopNode["selectNode"].(dataMap) + require.True(t, ok, "Expected selectNode") + + if a.filterMatches.HasValue() { + filterMatches, hasFilterMatches := selectNode["filterMatches"] + require.True(t, hasFilterMatches, "Expected filterMatches property") + assert.Equal(t, filterMatches, uint64(a.filterMatches.Value()), + "Expected %d filterMatches, got %d", a.filterMatches, filterMatches) + } + + scanNode, ok := selectNode["scanNode"].(dataMap) + subScanNode := map[string]any{} + if indexJoin, isJoin := selectNode["typeIndexJoin"].(dataMap); isJoin { + scanNode, ok = indexJoin["scanNode"].(dataMap) + subScanNode, _ = indexJoin["subTypeScanNode"].(dataMap) + } + require.True(t, ok, "Expected scanNode") + + getScanNodesProp := func(prop string) uint64 { + val, hasProp := scanNode[prop] + require.True(t, hasProp, fmt.Sprintf("Expected %s property", prop)) + actual := readNumberProp(t, val, prop) + if subScanNode[prop] != nil { + actual += readNumberProp(t, subScanNode[prop], "subTypeScanNode."+prop) + } + return actual + } + + if a.iterations.HasValue() { + actual := getScanNodesProp(iterationsProp) + assert.Equal(t, actual, uint64(a.iterations.Value()), + "Expected %d iterations, got %d", a.iterations.Value(), actual) + } + if a.docFetches.HasValue() { + actual := getScanNodesProp(docFetchesProp) + assert.Equal(t, actual, uint64(a.docFetches.Value()), + "Expected %d docFetches, got %d", a.docFetches.Value(), actual) + } + if a.fieldFetches.HasValue() { + actual := getScanNodesProp(fieldFetchesProp) + assert.Equal(t, actual, uint64(a.fieldFetches.Value()), + "Expected %d fieldFetches, got %d", a.fieldFetches.Value(), actual) + } + if a.indexFetches.HasValue() { + actual := getScanNodesProp(indexFetchesProp) + assert.Equal(t, actual, uint64(a.indexFetches.Value()), + "Expected %d indexFetches, got %d", a.indexFetches.Value(), actual) + } +} + +func (a *ExplainResultAsserter) WithIterations(iterations int) *ExplainResultAsserter { + a.iterations = immutable.Some[int](iterations) + return a +} + +func (a *ExplainResultAsserter) WithDocFetches(docFetches int) *ExplainResultAsserter { + a.docFetches = immutable.Some[int](docFetches) + return a +} + +func (a *ExplainResultAsserter) WithFieldFetches(fieldFetches int) *ExplainResultAsserter { + a.fieldFetches = immutable.Some[int](fieldFetches) + return a +} + +func (a *ExplainResultAsserter) WithIndexFetches(indexFetches int) *ExplainResultAsserter { + a.indexFetches = immutable.Some[int](indexFetches) + return a +} + +func (a *ExplainResultAsserter) WithFilterMatches(filterMatches int) *ExplainResultAsserter { + a.filterMatches = immutable.Some[int](filterMatches) + return a +} + +func (a *ExplainResultAsserter) WithSizeOfResults(sizeOfResults int) *ExplainResultAsserter { + a.sizeOfResults = immutable.Some[int](sizeOfResults) + return a +} + +func (a *ExplainResultAsserter) WithPlanExecutions(planExecutions uint64) *ExplainResultAsserter { + a.planExecutions = immutable.Some[uint64](planExecutions) + return a +} + +func NewExplainAsserter() *ExplainResultAsserter { + return &ExplainResultAsserter{} +} diff --git a/tests/integration/index/docs.go b/tests/integration/index/docs.go new file mode 100644 index 0000000000..505eadf98d --- /dev/null +++ b/tests/integration/index/docs.go @@ -0,0 +1,456 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +type docsCollection struct { + colName string + docs []map[string]any +} + +func getUserDocs() docsCollection { + return docsCollection{ + colName: "User", + docs: []map[string]any{ + { + "name": "Shahzad", + "age": 20, + "verified": false, + "email": "shahzad@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone Xs", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.2, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 512, + "OS": "iOS 12", + }, + }, + { + "model": "MacBook Pro", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.4, + "Chip": "Intel i5", + "RAM": 16, + "Storage": 2048, + "OS": "Yosemite", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 4635, + "city": "Montreal", + "country": "Canada", + "street": "Queen Mary Rd", + }, + }, + { + "name": "Bruno", + "age": 23, + "verified": true, + "email": "bruno@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{}, + }, + "address": map[string]any{ + "postalCode": 10001, + "city": "New York", + "country": "USA", + "street": "5th Ave", + }, + }, + { + "name": "Roy", + "age": 44, + "verified": true, + "email": "roy@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{}, + }, + "address": map[string]any{ + "postalCode": 90028, + "city": "Los Angeles", + "country": "USA", + "street": "Hollywood Blvd", + }, + }, + { + "name": "Fred", + "age": 28, + "verified": false, + "email": "fred@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Samsung Galaxy S20", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "AMD Athlon", + "RAM": 8, + "Storage": 256, + "OS": "Android 11", + }, + }, + { + "model": "Lenovo ThinkPad", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 1.9, + "Chip": "AMD Ryzen", + "RAM": 8, + "Storage": 1024, + "OS": "Windows 10", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 6512, + "city": "Montreal", + "country": "Canada", + "street": "Park Ave", + }, + }, + { + "name": "John", + "age": 30, + "verified": false, + "email": "john@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Google Pixel 5", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.4, + "Chip": "Octa-core", + "RAM": 16, + "Storage": 512, + "OS": "Android 11", + }, + }, + { + "model": "Asus Vivobook", + "year": 2022, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.9, + "Chip": "Intel i7", + "RAM": 64, + "Storage": 2048, + "OS": "Windows 10", + }, + }, + { + "model": "Commodore 64", + "year": 1982, + "type": "computer", + "specs": map[string]any{ + "CPU": 0.1, + "Chip": "MOS 6510", + "RAM": 1, + "Storage": 1, + "OS": "Commodore BASIC 2.0", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 690, + "city": "Montreal", + "country": "Canada", + "street": "Notre-Dame St W", + }, + }, + { + "name": "Islam", + "age": 32, + "verified": false, + "email": "islam@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 12s", + "year": 2018, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.1, + "Chip": "A11 Bionic", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 14", + }, + }, + { + "model": "MacBook Pro", + "year": 2023, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.6, + "Chip": "Apple M2 Max", + "RAM": 32, + "Storage": 1024, + "OS": "Sonoma 14", + }, + }, + { + "model": "iPad Pro", + "year": 2020, + "type": "tablet", + "specs": map[string]any{ + "CPU": 2.1, + "Chip": "Intel i5", + "RAM": 8, + "Storage": 512, + "OS": "iOS 14", + }, + }, + { + "model": "Playstation 5", + "year": 2022, + "type": "game_console", + "specs": map[string]any{ + "CPU": 3.5, + "Chip": "AMD Zen 2", + "RAM": 16, + "Storage": 825, + "OS": "FreeBSD", + }, + }, + { + "model": "Nokia 7610", + "year": 2003, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Cortex A710", + "RAM": 12, + "Storage": 2, + "OS": "Symbian 7.0", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 80804, + "city": "Munich", + "country": "Germany", + "street": "Leopold Str", + }, + }, + { + "name": "Andy", + "age": 33, + "verified": true, + "email": "andy@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Xiaomi Phone", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.6, + "Chip": "AMD Octen", + "RAM": 8, + "Storage": 512, + "OS": "Android 11", + }, + }, + { + "model": "Alienware x16", + "year": 2018, + "type": "laptop", + "specs": map[string]any{ + "CPU": 3.2, + "Chip": "Intel i7", + "RAM": 64, + "Storage": 2048, + "OS": "Windows 9", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 101103, + "city": "London", + "country": "UK", + "street": "Baker St", + }, + }, + { + "name": "Addo", + "age": 42, + "verified": true, + "email": "addo@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 10", + "year": 2021, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 256, + "OS": "iOS 12", + }, + }, + { + "model": "Acer Aspire 5", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "Intel i5", + "RAM": 16, + "Storage": 512, + "OS": "Windows 10", + }, + }, + { + "model": "HyperX Headset", + "year": 2014, + "type": "headset", + "specs": map[string]any{ + "CPU": nil, + "Chip": nil, + "RAM": nil, + "Storage": nil, + "OS": nil, + }, + }, + { + "model": "Playstation 5", + "year": 2021, + "type": "game_console", + "specs": map[string]any{ + "CPU": 3.5, + "Chip": "AMD Zen 2", + "RAM": 16, + "Storage": 825, + "OS": "FreeBSD", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 403, + "city": "Ottawa", + "country": "Canada", + "street": "Bank St", + }, + }, + { + "name": "Keenan", + "age": 48, + "verified": true, + "email": "keenan@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 13", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.3, + "Chip": "M1", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 14", + }, + }, + { + "model": "MacBook Pro", + "year": 2017, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "A11 Bionic", + "RAM": 16, + "Storage": 512, + "OS": "Ventura", + }, + }, + { + "model": "iPad Mini", + "year": 2015, + "type": "tablet", + "specs": map[string]any{ + "CPU": 1.9, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 12", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 1600, + "city": "San Francisco", + "country": "USA", + "street": "Market St", + }, + }, + { + "name": "Chris", + "age": 55, + "verified": true, + "email": "chris@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Walkman", + "year": 2000, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Cortex-A53 ", + "RAM": 8, + "Storage": 256, + "OS": "Android 11", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 11680, + "city": "Toronto", + "country": "Canada", + "street": "Yonge St", + }, + }, + }, + } +} diff --git a/tests/integration/index/query_performance_test.go b/tests/integration/index/query_performance_test.go new file mode 100644 index 0000000000..eec8a13f4b --- /dev/null +++ b/tests/integration/index/query_performance_test.go @@ -0,0 +1,86 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func generateDocsForCollection(colIndex, count int) []any { + result := make([]any, 0, count) + for i := 0; i < count; i++ { + result = append(result, testUtils.CreateDoc{ + CollectionID: colIndex, + Doc: fmt.Sprintf(`{ + "name": "name-%d", + "age": %d, + "email": "email%d@gmail.com" + }`, i, i%100, i), + }) + } + return result +} + +func TestQueryPerformance_Simple(t *testing.T) { + const benchReps = 10 + const numDocs = 500 + + test1 := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{Schema: ` + type User { + name: String + age: Int + email: String + } + `}, + testUtils.SchemaUpdate{ + Schema: ` + type IndexedUser { + name: String + age: Int @index + email: String + } + `, + }, + generateDocsForCollection(0, numDocs), + generateDocsForCollection(1, numDocs), + testUtils.Benchmark{ + Reps: benchReps, + BaseCase: testUtils.Request{Request: ` + query { + User(filter: {age: {_eq: 33}}) { + name + age + email + } + }`, + }, + OptimizedCase: testUtils.Request{Request: ` + query { + IndexedUser(filter: {age: {_eq: 33}}) { + name + age + email + } + }`, + }, + FocusClients: []testUtils.ClientType{testUtils.GoClientType}, + Factor: 5, + }, + }, + } + + testUtils.ExecuteTestCase(t, test1) +} diff --git a/tests/integration/index/query_with_index_combined_filter_test.go b/tests/integration/index/query_with_index_combined_filter_test.go new file mode 100644 index 0000000000..e5673d1ccf --- /dev/null +++ b/tests/integration/index/query_with_index_combined_filter_test.go @@ -0,0 +1,87 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndex_IfIndexFilterWithRegular_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + name: {_in: ["Fred", "Islam", "Addo"]}, + age: {_gt: 40} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Combination of a filter on regular and of an indexed field", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(3).WithFieldFetches(6).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfMultipleIndexFiltersWithRegular_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + name: {_like: "%a%"}, + age: {_gt: 30}, + email: {_like: "%m@gmail.com"} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Combination of a filter on regular and of 2 indexed fields", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int @index + email: String + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(18), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_index_only_filter_test.go b/tests/integration/index/query_with_index_only_filter_test.go new file mode 100644 index 0000000000..f8e2bae6cc --- /dev/null +++ b/tests/integration/index/query_with_index_only_filter_test.go @@ -0,0 +1,534 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndex_WithNonIndexedFields_ShouldFetchAllOfThem(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + age + } + }` + test := testUtils.TestCase{ + Description: "If there are non-indexed fields in the query, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{{ + "name": "Islam", + "age": uint64(32), + }}, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(1).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfSeveralDocsWithEqFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + age + } + }` + test := testUtils.TestCase{ + Description: "If there are several docs matching _eq filter, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Islam", + "age": 18 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"age": uint64(32)}, + {"age": uint64(18)}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithGreaterThanFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_gt: 48}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithGreaterOrEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_ge: 48}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Keenan"}, + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLessThanFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 22}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLessOrEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 23}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Islam"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ne filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Fred"}, + {"name": "John"}, + {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "Keenan"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(9).WithFieldFetches(9).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_in: [20, 33]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _in filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfSeveralDocsWithInFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_in: ["Islam"]}}) { + age + } + }` + test := testUtils.TestCase{ + Description: "If there are several docs matching _in filter, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Islam", + "age": 18 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"age": uint64(32)}, + {"age": uint64(18)}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_nin: [20, 23, 28, 33, 42, 55]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nin filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "John"}, + {"name": "Islam"}, + {"name": "Roy"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(4).WithFieldFetches(8).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {email: {_like: "a%"}}) { + name + } + }` + req2 := `query { + User(filter: {email: {_like: "%d@gmail.com"}}) { + name + } + }` + req3 := `query { + User(filter: {email: {_like: "%e%"}}) { + name + } + }` + req4 := `query { + User(filter: {email: {_like: "fred@gmail.com"}}) { + name + } + }` + req5 := `query { + User(filter: {email: {_like: "a%@gmail.com"}}) { + name + } + }` + req6 := `query { + User(filter: {email: {_like: "a%com%m"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _like filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + email: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req3), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req4, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req4), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req5, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req5), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req6, + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: makeExplainQuery(req6), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(0).WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nlike: "%h%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nlike filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Fred"}, + {"name": "Bruno"}, + {"name": "Islam"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(7).WithFieldFetches(7).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go new file mode 100644 index 0000000000..4a217e931c --- /dev/null +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -0,0 +1,310 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + devices: {model: {_eq: "MacBook Pro"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + devices: {model: {_eq: "iPhone 10"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-N relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + devices: [Device] + } + + type Device { + model: String @index + owner: User + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam"}, + {"name": "Shahzad"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "Munich"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + address: {city: {_eq: "Montreal"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed secondary relation field in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address + } + + type Address { + user: User + city: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Fred"}, + {"name": "John"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "London"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + address: {city: {_eq: "Montreal"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed field of primary relation in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address @primary + } + + type Address { + user: User + city: String @index + street: String + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "John"}, + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(15).WithFieldFetches(18).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedRelationWhileIndexedForeignField_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + address: {city: {_eq: "London"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed field of primary relation while having indexed foreign field in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address @primary @index + } + + type Address { + user: User + city: String @index + street: String + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToTwoRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "Munich"}} + }) { + name + address { + city + } + } + }` + req2 := `query { + User(filter: { + devices: {model: {_eq: "Walkman"}} + }) { + name + devices { + model + } + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-1 and 1-N relations", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address + devices: [Device] + } + + type Device { + model: String @index + owner: User + } + + type Address { + user: User + city: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + { + "name": "Islam", + "address": map[string]any{ + "city": "Munich", + }, + }, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + { + "name": "Chris", + "devices": map[string]any{ + "model": "Walkman", + }, + }, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/utils.go b/tests/integration/index/utils.go new file mode 100644 index 0000000000..bb6cb89f14 --- /dev/null +++ b/tests/integration/index/utils.go @@ -0,0 +1,290 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "fmt" + "strings" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +// createSchemaWithDocs returns UpdateSchema action and CreateDoc actions +// with the documents that match the schema. +// The schema is parsed to get the list of properties, and the docs +// are created with the same properties. +// This allows us to have only one large list of docs with predefined +// properties, and create schemas with different properties from it. +func createSchemaWithDocs(schema string) []any { + userDocs := getUserDocs() + resultActions := make([]any, 0, len(userDocs.docs)+1) + resultActions = append(resultActions, testUtils.SchemaUpdate{Schema: schema}) + parser := schemaParser{} + typeDefs := parser.Parse(schema) + generator := createDocGenerator{types: typeDefs} + for _, doc := range userDocs.docs { + actions := generator.GenerateDocs(doc, userDocs.colName) + resultActions = append(resultActions, actions...) + } + return resultActions +} + +type createDocGenerator struct { + types map[string]typeDefinition +} + +func createDocJSON(doc map[string]any, typeDef *typeDefinition) string { + sb := strings.Builder{} + for propName := range doc { + format := `"%s": %v` + if _, isStr := doc[propName].(string); isStr { + format = `"%s": "%v"` + } + if sb.Len() == 0 { + sb.WriteString("{\n") + } else { + sb.WriteString(",\n") + } + sb.WriteString(fmt.Sprintf(format, propName, doc[propName])) + } + sb.WriteString("\n}") + return sb.String() +} + +func toRequestedDoc(doc map[string]any, typeDef *typeDefinition) map[string]any { + result := make(map[string]any) + for _, prop := range typeDef.props { + if prop.isRelation { + continue + } + result[prop.name] = doc[prop.name] + } + for name, val := range doc { + if strings.HasSuffix(name, request.RelatedObjectID) { + result[name] = val + } + } + return result +} + +func (this *createDocGenerator) generatePrimary( + doc map[string]any, + typeDef *typeDefinition, +) (map[string]any, []any) { + result := []any{} + requested := toRequestedDoc(doc, typeDef) + for _, prop := range typeDef.props { + if prop.isRelation { + if _, hasProp := doc[prop.name]; hasProp { + if prop.isPrimary.Value() { + subType := this.types[prop.typeStr] + subDoc := toRequestedDoc(doc[prop.name].(map[string]any), &subType) + jsonSubDoc := createDocJSON(subDoc, &subType) + clientSubDoc, err := client.NewDocFromJSON([]byte(jsonSubDoc)) + if err != nil { + panic("Failed to create doc from JSON: " + err.Error()) + } + requested[prop.name+request.RelatedObjectID] = clientSubDoc.Key().String() + result = append(result, testUtils.CreateDoc{CollectionID: subType.index, Doc: jsonSubDoc}) + } + } + } + } + return requested, result +} + +func (this *createDocGenerator) GenerateDocs(doc map[string]any, typeName string) []any { + typeDef := this.types[typeName] + + requested, result := this.generatePrimary(doc, &typeDef) + docStr := createDocJSON(requested, &typeDef) + + result = append(result, testUtils.CreateDoc{CollectionID: typeDef.index, Doc: docStr}) + + var docKey string + for _, prop := range typeDef.props { + if prop.isRelation { + if _, hasProp := doc[prop.name]; hasProp { + if !prop.isPrimary.Value() { + if docKey == "" { + clientDoc, err := client.NewDocFromJSON([]byte(docStr)) + if err != nil { + panic("Failed to create doc from JSON: " + err.Error()) + } + docKey = clientDoc.Key().String() + } + actions := this.generateSecondaryDocs(doc, typeName, &prop, docKey) + result = append(result, actions...) + } + } + } + } + return result +} + +func (this *createDocGenerator) generateSecondaryDocs( + primaryDoc map[string]any, + primaryTypeName string, + relProp *propDefinition, + primaryDocKey string, +) []any { + result := []any{} + relTypeDef := this.types[relProp.typeStr] + primaryPropName := "" + for _, relDocProp := range relTypeDef.props { + if relDocProp.typeStr == primaryTypeName && relDocProp.isPrimary.Value() { + primaryPropName = relDocProp.name + request.RelatedObjectID + switch relVal := primaryDoc[relProp.name].(type) { + case docsCollection: + for _, relDoc := range relVal.docs { + relDoc[primaryPropName] = primaryDocKey + actions := this.GenerateDocs(relDoc, relTypeDef.name) + result = append(result, actions...) + } + case map[string]any: + relVal[primaryPropName] = primaryDocKey + actions := this.GenerateDocs(relVal, relTypeDef.name) + result = append(result, actions...) + } + } + } + return result +} + +type propDefinition struct { + name string + typeStr string + isArray bool + isRelation bool + isPrimary immutable.Option[bool] +} + +type typeDefinition struct { + name string + index int + props map[string]propDefinition +} + +type schemaParser struct { + types map[string]typeDefinition + schemaLines []string + firstRelationType string + currentTypeDef typeDefinition + relationTypesMap map[string]map[string]string +} + +func (p *schemaParser) Parse(schema string) map[string]typeDefinition { + p.types = make(map[string]typeDefinition) + p.relationTypesMap = make(map[string]map[string]string) + p.schemaLines = strings.Split(schema, "\n") + p.findTypes() + + for _, line := range p.schemaLines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "type ") { + typeNameEndPos := strings.Index(line[5:], " ") + typeName := strings.TrimSpace(line[5 : 5+typeNameEndPos]) + p.currentTypeDef = p.types[typeName] + continue + } + if strings.HasPrefix(line, "}") { + p.types[p.currentTypeDef.name] = p.currentTypeDef + continue + } + pos := strings.Index(line, ":") + if pos != -1 { + p.defineProp(line, pos) + } + } + p.resolvePrimaryRelations() + return p.types +} + +func (p *schemaParser) findTypes() { + typeIndex := 0 + for _, line := range p.schemaLines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "type ") { + typeNameEndPos := strings.Index(line[5:], " ") + typeName := strings.TrimSpace(line[5 : 5+typeNameEndPos]) + p.types[typeName] = typeDefinition{name: typeName, index: typeIndex, props: make(map[string]propDefinition)} + typeIndex++ + } + } +} + +func (p *schemaParser) defineProp(line string, pos int) { + prop := propDefinition{name: line[:pos]} + prop.typeStr = strings.TrimSpace(line[pos+1:]) + typeEndPos := strings.Index(prop.typeStr, " ") + if typeEndPos != -1 { + prop.typeStr = prop.typeStr[:typeEndPos] + } + if prop.typeStr[0] == '[' { + prop.isArray = true + prop.typeStr = prop.typeStr[1 : len(prop.typeStr)-1] + } + if _, isRelation := p.types[prop.typeStr]; isRelation { + prop.isRelation = true + if prop.isArray { + prop.isPrimary = immutable.Some(false) + } else if strings.Contains(line[pos+len(prop.typeStr)+2:], "@primary") { + prop.isPrimary = immutable.Some(true) + } + relMap := p.relationTypesMap[prop.typeStr] + if relMap == nil { + relMap = make(map[string]string) + } + relMap[prop.name] = p.currentTypeDef.name + p.relationTypesMap[prop.typeStr] = relMap + if p.firstRelationType == "" { + p.firstRelationType = p.currentTypeDef.name + } + } + p.currentTypeDef.props[prop.name] = prop +} + +func (p *schemaParser) resolvePrimaryRelations() { + for typeName, relationProps := range p.relationTypesMap { + typeDef := p.types[typeName] + for _, prop := range typeDef.props { + for relPropName, relPropType := range relationProps { + if prop.typeStr == relPropType { + relatedTypeDef := p.types[relPropType] + relatedProp := relatedTypeDef.props[relPropName] + if !relatedProp.isPrimary.HasValue() { + relatedProp.isPrimary = immutable.Some(typeName == p.firstRelationType) + relatedTypeDef.props[relPropName] = relatedProp + p.types[relPropType] = relatedTypeDef + delete(p.relationTypesMap, relPropType) + } + if !prop.isPrimary.HasValue() { + val := typeName != p.firstRelationType + if relatedProp.isPrimary.HasValue() { + val = !relatedProp.isPrimary.Value() + } + prop.isPrimary = immutable.Some(val) + typeDef.props[prop.name] = prop + } + } + } + } + p.types[typeName] = typeDef + } +} + +func makeExplainQuery(req string) string { + return "query @explain(type: execute) " + req[6:] +} diff --git a/tests/integration/results.go b/tests/integration/results.go index 176b2e4cf2..35a2249c0b 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -28,7 +28,7 @@ type AnyOf []any // The comparison is relaxed when using client types other than goClientType. func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual any, msgAndArgs ...any) { switch client { - case httpClientType, cliClientType: + case HTTPClientType, CLIClientType: if !areResultsAnyOf(expected, actual) { assert.Contains(t, expected, actual, msgAndArgs...) } @@ -42,7 +42,7 @@ func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual // The comparison is relaxed when using client types other than goClientType. func assertResultsEqual(t *testing.T, client ClientType, expected any, actual any, msgAndArgs ...any) { switch client { - case httpClientType, cliClientType: + case HTTPClientType, CLIClientType: if !areResultsEqual(expected, actual) { assert.EqualValues(t, expected, actual, msgAndArgs...) } diff --git a/tests/integration/state.go b/tests/integration/state.go index 4b48494c32..5e47e0adfe 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -83,6 +83,9 @@ type state struct { // Indexes, by index, by collection index, by node index. indexes [][][]client.IndexDescription + + // isBench indicates wether the test is currently being benchmarked. + isBench bool } // newState returns a new fresh state for the given testCase. diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index 10f3cf7262..fabdccbbfd 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -11,6 +11,8 @@ package tests import ( + "testing" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -254,6 +256,36 @@ type GetIndexes struct { ExpectedError string } +// ResultAsserter is an interface that can be implemented to provide custom result +// assertions. +type ResultAsserter interface { + // Assert will be called with the test and the result of the request. + Assert(t *testing.T, result []map[string]any) +} + +// ResultAsserterFunc is a function that can be used to implement the ResultAsserter +type ResultAsserterFunc func(*testing.T, []map[string]any) (bool, string) + +func (f ResultAsserterFunc) Assert(t *testing.T, result []map[string]any) { + f(t, result) +} + +// Benchmark is an action that will run another test action for benchmark test. +// It will run benchmarks for a base case and optimized case and assert that +// the optimized case performs better by at least the given factor. +type Benchmark struct { + // BaseCase is a test action which is the base case to benchmark. + BaseCase any + // OptimizedCase is a test action which is the optimized case to benchmark. + OptimizedCase any + // Reps is the number of times to run the benchmark. + Reps int + // FocusClients is the list of clients to run the benchmark on. + FocusClients []ClientType + // Factor is the factor by which the optimized case should be better than the base case. + Factor float64 +} + // Request represents a standard Defra (GQL) request. type Request struct { // NodeID may hold the ID (index) of a node to execute this request on. @@ -271,6 +303,9 @@ type Request struct { // The expected (data) results of the issued request. Results []map[string]any + // Asserter is an optional custom result asserter. + Asserter ResultAsserter + // Any error expected from the action. Optional. // // String can be a partial, and the test will pass if an error is returned that diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 3354c43561..9e8c71792e 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -62,15 +62,15 @@ const ( type ClientType string const ( - // goClientType enables running the test suite using + // GoClientType enables running the test suite using // the go implementation of the client.DB interface. - goClientType ClientType = "go" - // httpClientType enables running the test suite using + GoClientType ClientType = "go" + // HTTPClientType enables running the test suite using // the http implementation of the client.DB interface. - httpClientType ClientType = "http" - // cliClientType enables running the test suite using + HTTPClientType ClientType = "http" + // CLIClientType enables running the test suite using // the cli implementation of the client.DB interface. - cliClientType ClientType = "cli" + CLIClientType ClientType = "cli" ) // The MutationType that tests will run using. @@ -259,13 +259,13 @@ func GetDatabase(s *state) (cdb client.DB, path string, err error) { } switch s.clientType { - case httpClientType: + case HTTPClientType: cdb, err = http.NewWrapper(cdb) - case cliClientType: + case CLIClientType: cdb = cli.NewWrapper(cdb) - case goClientType: + case GoClientType: return default: @@ -288,19 +288,20 @@ func ExecuteTestCase( t *testing.T, testCase TestCase, ) { + flattenActions(&testCase) collectionNames := getCollectionNames(testCase) changeDetector.PreTestChecks(t, collectionNames) skipIfMutationTypeUnsupported(t, testCase.SupportedMutationTypes) var clients []ClientType if httpClient { - clients = append(clients, httpClientType) + clients = append(clients, HTTPClientType) } if goClient { - clients = append(clients, goClientType) + clients = append(clients, GoClientType) } if cliClient { - clients = append(clients, cliClientType) + clients = append(clients, CLIClientType) } var databases []DatabaseType @@ -338,11 +339,8 @@ func executeTestCase( log.Info( ctx, testCase.Description, - logging.NewKV("badgerFile", badgerFile), - logging.NewKV("badgerInMemory", badgerInMemory), - logging.NewKV("inMemoryStore", inMemoryStore), - logging.NewKV("httpClient", httpClient), - logging.NewKV("goClient", goClient), + logging.NewKV("database", dbt), + logging.NewKV("client", clientType), logging.NewKV("mutationType", mutationType), logging.NewKV("databaseDir", databaseDir), logging.NewKV("changeDetector.Enabled", changeDetector.Enabled), @@ -352,7 +350,6 @@ func executeTestCase( logging.NewKV("changeDetector.Repository", changeDetector.Repository), ) - flattenActions(&testCase) startActionIndex, endActionIndex := getActionRange(testCase) s := newState(ctx, t, testCase, dbt, clientType, collectionNames) @@ -370,110 +367,163 @@ func executeTestCase( refreshIndexes(s) for i := startActionIndex; i <= endActionIndex; i++ { - switch action := testCase.Actions[i].(type) { - case ConfigureNode: - configureNode(s, action) + performAction(s, i, testCase.Actions[i]) + } - case Restart: - restartNodes(s, i) + // Notify any active subscriptions that all requests have been sent. + close(s.allActionsDone) - case ConnectPeers: - connectPeers(s, action) + for _, resultsChan := range s.subscriptionResultsChans { + select { + case subscriptionAssert := <-resultsChan: + // We want to assert back in the main thread so failures get recorded properly + subscriptionAssert() - case ConfigureReplicator: - configureReplicator(s, action) + // a safety in case the stream hangs - we don't want the tests to run forever. + case <-time.After(subscriptionTimeout): + assert.Fail(t, "timeout occurred while waiting for data stream", testCase.Description) + } + } +} - case SubscribeToCollection: - subscribeToCollection(s, action) +func performAction( + s *state, + actionIndex int, + act any, +) { + switch action := act.(type) { + case ConfigureNode: + configureNode(s, action) - case UnsubscribeToCollection: - unsubscribeToCollection(s, action) + case Restart: + restartNodes(s, actionIndex) - case GetAllP2PCollections: - getAllP2PCollections(s, action) + case ConnectPeers: + connectPeers(s, action) - case SchemaUpdate: - updateSchema(s, action) + case ConfigureReplicator: + configureReplicator(s, action) - case SchemaPatch: - patchSchema(s, action) + case SubscribeToCollection: + subscribeToCollection(s, action) - case SetDefaultSchemaVersion: - setDefaultSchemaVersion(s, action) + case UnsubscribeToCollection: + unsubscribeToCollection(s, action) - case ConfigureMigration: - configureMigration(s, action) + case GetAllP2PCollections: + getAllP2PCollections(s, action) - case GetMigrations: - getMigrations(s, action) + case SchemaUpdate: + updateSchema(s, action) - case CreateDoc: - createDoc(s, action) + case SchemaPatch: + patchSchema(s, action) - case DeleteDoc: - deleteDoc(s, action) + case SetDefaultSchemaVersion: + setDefaultSchemaVersion(s, action) - case UpdateDoc: - updateDoc(s, action) + case ConfigureMigration: + configureMigration(s, action) - case CreateIndex: - createIndex(s, action) + case GetMigrations: + getMigrations(s, action) - case DropIndex: - dropIndex(s, action) + case CreateDoc: + createDoc(s, action) - case GetIndexes: - getIndexes(s, action) + case DeleteDoc: + deleteDoc(s, action) - case BackupExport: - backupExport(s, action) + case UpdateDoc: + updateDoc(s, action) - case BackupImport: - backupImport(s, action) + case CreateIndex: + createIndex(s, action) - case TransactionCommit: - commitTransaction(s, action) + case DropIndex: + dropIndex(s, action) - case SubscriptionRequest: - executeSubscriptionRequest(s, action) + case GetIndexes: + getIndexes(s, action) - case Request: - executeRequest(s, action) + case BackupExport: + backupExport(s, action) - case ExplainRequest: - executeExplainRequest(s, action) + case BackupImport: + backupImport(s, action) - case IntrospectionRequest: - assertIntrospectionResults(s, action) + case TransactionCommit: + commitTransaction(s, action) - case ClientIntrospectionRequest: - assertClientIntrospectionResults(s, action) + case SubscriptionRequest: + executeSubscriptionRequest(s, action) - case WaitForSync: - waitForSync(s, action) + case Request: + executeRequest(s, action) - case SetupComplete: - // no-op, just continue. + case ExplainRequest: + executeExplainRequest(s, action) - default: - t.Fatalf("Unknown action type %T", action) - } - } + case IntrospectionRequest: + assertIntrospectionResults(s, action) - // Notify any active subscriptions that all requests have been sent. - close(s.allActionsDone) + case ClientIntrospectionRequest: + assertClientIntrospectionResults(s, action) - for _, resultsChan := range s.subscriptionResultsChans { - select { - case subscriptionAssert := <-resultsChan: - // We want to assert back in the main thread so failures get recorded properly - subscriptionAssert() + case WaitForSync: + waitForSync(s, action) - // a safety in case the stream hangs - we don't want the tests to run forever. - case <-time.After(subscriptionTimeout): - assert.Fail(t, "timeout occurred while waiting for data stream", testCase.Description) + case Benchmark: + benchmarkAction(s, actionIndex, action) + + case SetupComplete: + // no-op, just continue. + + default: + s.t.Fatalf("Unknown action type %T", action) + } +} + +func benchmarkAction( + s *state, + actionIndex int, + bench Benchmark, +) { + if s.dbt == defraIMType { + // Benchmarking makes no sense for test in-memory storage + return + } + if len(bench.FocusClients) > 0 { + isFound := false + for _, clientType := range bench.FocusClients { + if s.clientType == clientType { + isFound = true + break + } + } + if !isFound { + return } } + + runBench := func(benchCase any) time.Duration { + startTime := time.Now() + for i := 0; i < bench.Reps; i++ { + performAction(s, actionIndex, benchCase) + } + return time.Since(startTime) + } + + s.isBench = true + defer func() { s.isBench = false }() + + baseElapsedTime := runBench(bench.BaseCase) + optimizedElapsedTime := runBench(bench.OptimizedCase) + + factoredBaseTime := int64(float64(baseElapsedTime) / bench.Factor) + assert.Greater(s.t, factoredBaseTime, optimizedElapsedTime, + "Optimized case should be faster at least by factor of %.2f than the base case. Base: %d, Optimized: %d (μs)", + bench.Factor, optimizedElapsedTime.Microseconds(), baseElapsedTime.Microseconds()) } // getCollectionNames gets an ordered, unique set of collection names across all nodes @@ -1536,6 +1586,7 @@ func executeRequest( &result.GQL, action.Results, action.ExpectedError, + action.Asserter, nodeID, anyOfByFieldKey, ) @@ -1601,6 +1652,7 @@ func executeSubscriptionRequest( finalResult, action.Results, action.ExpectedError, + nil, // anyof is not yet supported by subscription requests 0, map[docFieldKey][]any{}, @@ -1673,10 +1725,12 @@ func assertRequestResults( result *client.GQLResult, expectedResults []map[string]any, expectedError string, + asserter ResultAsserter, nodeID int, anyOfByField map[docFieldKey][]any, ) bool { - if AssertErrors(s.t, s.testCase.Description, result.Errors, expectedError) { + // we skip assertion benchmark because you don't specify expected result for benchmark. + if AssertErrors(s.t, s.testCase.Description, result.Errors, expectedError) || s.isBench { return true } @@ -1687,9 +1741,16 @@ func assertRequestResults( // Note: if result.Data == nil this panics (the panic seems useful while testing). resultantData := result.Data.([]map[string]any) + if asserter != nil { + asserter.Assert(s.t, resultantData) + return true + } + log.Info(s.ctx, "", logging.NewKV("RequestResults", result.Data)) - require.Equal(s.t, len(expectedResults), len(resultantData), s.testCase.Description) + // compare results + require.Equal(s.t, len(expectedResults), len(resultantData), + s.testCase.Description+" \n(number of results don't match)") for docIndex, result := range resultantData { expectedResult := expectedResults[docIndex] From 1e255e70fc802af9065b99c3ba8167ab17f476c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 14 Oct 2023 08:22:15 -0400 Subject: [PATCH 25/55] bot: Bump golang.org/x/net from 0.16.0 to 0.17.0 (#1961) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2659ca7667..8fb5ae48ee 100644 --- a/go.mod +++ b/go.mod @@ -47,8 +47,8 @@ require ( go.uber.org/zap v1.25.0 golang.org/x/crypto v0.14.0 golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/net v0.17.0 google.golang.org/grpc v1.58.2 - golang.org/x/net v0.16.0 google.golang.org/protobuf v1.31.0 ) diff --git a/go.sum b/go.sum index 34d3e57a48..1b1c80f4bf 100644 --- a/go.sum +++ b/go.sum @@ -1484,8 +1484,8 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= From 5c1b21e2200897263b28ce7c06a223be6bc37862 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Mon, 16 Oct 2023 16:55:28 -0400 Subject: [PATCH 26/55] refactor: Deprecate CollectionDescription.Schema (#1939) ## Relevant issue(s) Resolves #1955 ## Description Deprecate CollectionDescription.Schema. Schema is not a sub-property of collection. Removes as many references to CollectionDescription.Schema as possible without making any breaking changes. Breaking changes will be made in a later PR. An exception has been made to the http API, which does have a breaking change in this PR. --- cli/collection_describe.go | 6 +- client/collection.go | 19 +- client/descriptions.go | 49 ++- core/parser.go | 4 +- db/base/collection_keys.go | 20 +- db/collection.go | 239 ++++++----- db/collection_delete.go | 4 +- db/collection_get.go | 5 +- db/collection_index.go | 25 +- db/collection_test.go | 309 --------------- db/collection_update.go | 10 +- db/db_test.go | 372 ------------------ db/errors.go | 2 +- db/fetcher/fetcher.go | 19 +- db/fetcher/indexer.go | 10 +- db/fetcher/mocks/fetcher.go | 12 +- db/fetcher/versioned.go | 13 +- db/fetcher_test.go | 156 -------- db/index.go | 6 +- db/index_test.go | 278 ++++++------- db/indexed_docs_test.go | 65 ++- db/p2p_collection_test.go | 34 +- db/schema.go | 68 ++-- http/client.go | 20 +- http/client_collection.go | 46 ++- http/handler_store.go | 10 +- lens/fetcher.go | 20 +- net/process.go | 2 +- planner/commit.go | 2 +- planner/datasource.go | 16 +- planner/mapper/descriptions.go | 66 ---- planner/mapper/mapper.go | 160 +++++--- planner/planner.go | 13 +- planner/scan.go | 28 +- planner/select.go | 19 +- planner/type_join.go | 17 +- request/graphql/parser.go | 4 +- request/graphql/schema/collection.go | 36 +- request/graphql/schema/descriptions_test.go | 96 +++-- request/graphql/schema/generate.go | 16 +- request/graphql/schema/index_test.go | 4 +- tests/clients/cli/wrapper.go | 20 +- tests/clients/cli/wrapper_collection.go | 54 +-- .../schema/updates/remove/simple_test.go | 2 +- 44 files changed, 761 insertions(+), 1615 deletions(-) delete mode 100644 planner/mapper/descriptions.go diff --git a/cli/collection_describe.go b/cli/collection_describe.go index 1d6ee55821..a21c4d0c10 100644 --- a/cli/collection_describe.go +++ b/cli/collection_describe.go @@ -39,16 +39,16 @@ Example: view collection by version id col, ok := tryGetCollectionContext(cmd) if ok { - return writeJSON(cmd, col.Description()) + return writeJSON(cmd, col.Definition()) } // if no collection specified list all collections cols, err := store.GetAllCollections(cmd.Context()) if err != nil { return err } - colDesc := make([]client.CollectionDescription, len(cols)) + colDesc := make([]client.CollectionDefinition, len(cols)) for i, col := range cols { - colDesc[i] = col.Description() + colDesc[i] = col.Definition() } return writeJSON(cmd, colDesc) }, diff --git a/client/collection.go b/client/collection.go index 9c91dccb7c..f52eeec0c2 100644 --- a/client/collection.go +++ b/client/collection.go @@ -16,6 +16,14 @@ import ( "github.com/sourcenetwork/defradb/datastore" ) +// CollectionDefinition contains the metadata defining what a Collection is. +type CollectionDefinition struct { + // Description returns the CollectionDescription of this Collection. + Description CollectionDescription `json:"description"` + // Schema returns the SchemaDescription used to define this Collection. + Schema SchemaDescription `json:"schema"` +} + // Collection represents a defradb collection. // // A Collection is mostly analogous to a SQL table, however a collection is specific to its @@ -23,17 +31,20 @@ import ( // // Many functions on this object will interact with the underlying datastores. type Collection interface { - // Description returns the CollectionDescription of this Collection. - Description() CollectionDescription // Name returns the name of this collection. Name() string - // Schema returns the SchemaDescription used to define this Collection. - Schema() SchemaDescription // ID returns the ID of this Collection. ID() uint32 // SchemaID returns the ID of the Schema used to define this Collection. SchemaID() string + // Definition contains the metadata defining what a Collection is. + Definition() CollectionDefinition + // Schema returns the SchemaDescription used to define this Collection. + Schema() SchemaDescription + // Description returns the CollectionDescription of this Collection. + Description() CollectionDescription + // Create a new document. // // Will verify the DocKey/CID to ensure that the new document is correctly formatted. diff --git a/client/descriptions.go b/client/descriptions.go index 4f388fa7d3..8dbe54ddce 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -28,6 +28,8 @@ type CollectionDescription struct { ID uint32 // Schema contains the data type information that this Collection uses. + // + // This property is deprecated and should not be used. Schema SchemaDescription // Indexes contains the secondary indexes that this Collection has. @@ -41,12 +43,21 @@ func (col CollectionDescription) IDString() string { // GetFieldByID searches for a field with the given ID. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByID(id FieldID) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.ID == id { - return field, true - } +func (col CollectionDescription) GetFieldByID(id FieldID, schema *SchemaDescription) (FieldDescription, bool) { + for _, field := range schema.Fields { + if field.ID == id { + return field, true + } + } + return FieldDescription{}, false +} + +// GetFieldByName returns the field for the given field name. If such a field is found it +// will return it and true, if it is not found it will return false. +func (col CollectionDescription) GetFieldByName(fieldName string, schema *SchemaDescription) (FieldDescription, bool) { + for _, field := range schema.Fields { + if field.Name == fieldName { + return field, true } } return FieldDescription{}, false @@ -57,8 +68,9 @@ func (col CollectionDescription) GetFieldByRelation( relationName string, otherCollectionName string, otherFieldName string, + schema *SchemaDescription, ) (FieldDescription, bool) { - for _, field := range col.Schema.Fields { + for _, field := range schema.Fields { if field.RelationName == relationName && !(col.Name == otherCollectionName && otherFieldName == field.Name) { return field, true } @@ -93,28 +105,11 @@ type SchemaDescription struct { Fields []FieldDescription } -// IsEmpty returns true if the SchemaDescription is empty and uninitialized -func (sd SchemaDescription) IsEmpty() bool { - return len(sd.Fields) == 0 -} - -// GetFieldKey returns the field ID for the given field name. -func (sd SchemaDescription) GetFieldKey(fieldName string) uint32 { - for _, field := range sd.Fields { - if field.Name == fieldName { - return uint32(field.ID) - } - } - return uint32(0) -} - // GetField returns the field of the given name. func (sd SchemaDescription) GetField(name string) (FieldDescription, bool) { - if !sd.IsEmpty() { - for _, field := range sd.Fields { - if field.Name == name { - return field, true - } + for _, field := range sd.Fields { + if field.Name == name { + return field, true } } return FieldDescription{}, false diff --git a/core/parser.go b/core/parser.go index ee2d2cfbf1..300f4411a4 100644 --- a/core/parser.go +++ b/core/parser.go @@ -51,8 +51,8 @@ type Parser interface { NewFilterFromString(collectionType string, body string) (immutable.Option[request.Filter], error) // ParseSDL parses an SDL string into a set of collection descriptions. - ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) + ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDefinition, error) // Adds the given schema to this parser's model. - SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDescription) error + SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDefinition) error } diff --git a/db/base/collection_keys.go b/db/base/collection_keys.go index 0276975630..6a762ff180 100644 --- a/db/base/collection_keys.go +++ b/db/base/collection_keys.go @@ -34,6 +34,7 @@ func MakeDocKey(col client.CollectionDescription, docKey string) core.DataStoreK func MakePrimaryIndexKeyForCRDT( c client.CollectionDescription, + schema client.SchemaDescription, ctype client.CType, key core.DataStoreKey, fieldName string, @@ -42,19 +43,12 @@ func MakePrimaryIndexKeyForCRDT( case client.COMPOSITE: return MakeCollectionKey(c).WithInstanceInfo(key).WithFieldId(core.COMPOSITE_NAMESPACE), nil case client.LWW_REGISTER: - fieldKey := getFieldKey(c, key, fieldName) - return MakeCollectionKey(c).WithInstanceInfo(fieldKey), nil - } - return core.DataStoreKey{}, ErrInvalidCrdtType -} + field, ok := c.GetFieldByName(fieldName, &schema) + if !ok { + return core.DataStoreKey{}, client.NewErrFieldNotExist(fieldName) + } -func getFieldKey( - c client.CollectionDescription, - key core.DataStoreKey, - fieldName string, -) core.DataStoreKey { - if !c.Schema.IsEmpty() { - return key.WithFieldId(fmt.Sprint(c.Schema.GetFieldKey(fieldName))) + return MakeCollectionKey(c).WithInstanceInfo(key).WithFieldId(fmt.Sprint(field.ID)), nil } - return key.WithFieldId(fieldName) + return core.DataStoreKey{}, ErrInvalidCrdtType } diff --git a/db/collection.go b/db/collection.go index df8ca85cc1..8fdf9089ed 100644 --- a/db/collection.go +++ b/db/collection.go @@ -54,11 +54,7 @@ type collection struct { // of the operation in question. txn immutable.Option[datastore.Txn] - colID uint32 - - schemaID string - - desc client.CollectionDescription + def client.CollectionDefinition indexes []CollectionIndex fetcherFactory func() fetcher.Fetcher @@ -71,42 +67,10 @@ type collection struct { // CollectionOptions object. // NewCollection returns a pointer to a newly instanciated DB Collection -func (db *db) newCollection(desc client.CollectionDescription) (*collection, error) { - if desc.Name == "" { - return nil, client.NewErrUninitializeProperty("Collection", "Name") - } - - if len(desc.Schema.Fields) == 0 { - return nil, client.NewErrUninitializeProperty("Collection", "Fields") - } - - docKeyField := desc.Schema.Fields[0] - if docKeyField.Kind != client.FieldKind_DocKey || docKeyField.Name != request.KeyFieldName { - return nil, ErrSchemaFirstFieldDocKey - } - - for i, field := range desc.Schema.Fields { - if field.Name == "" { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "Name") - } - if field.Kind == client.FieldKind_None { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "FieldKind") - } - if (field.Kind != client.FieldKind_DocKey && !field.IsObject()) && - field.Typ == client.NONE_CRDT { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "CRDT type") - } - desc.Schema.Fields[i].ID = client.FieldID(i) - } - +func (db *db) newCollection(desc client.CollectionDescription, schema client.SchemaDescription) (*collection, error) { return &collection{ - db: db, - desc: client.CollectionDescription{ - ID: desc.ID, - Name: desc.Name, - Schema: desc.Schema, - }, - colID: desc.ID, + db: db, + def: client.CollectionDefinition{Description: desc, Schema: schema}, }, nil } @@ -130,8 +94,11 @@ func (c *collection) newFetcher() fetcher.Fetcher { func (db *db) createCollection( ctx context.Context, txn datastore.Txn, - desc client.CollectionDescription, + def client.CollectionDefinition, ) (client.Collection, error) { + schema := def.Schema + desc := def.Description + // check if collection by this name exists collectionKey := core.NewCollectionKey(desc.Name) exists, err := txn.Systemstore().Has(ctx, collectionKey.ToDS()) @@ -151,14 +118,19 @@ func (db *db) createCollection( return nil, err } desc.ID = uint32(colID) - col, err := db.newCollection(desc) + + for i := range schema.Fields { + schema.Fields[i].ID = client.FieldID(i) + } + + col, err := db.newCollection(desc, schema) if err != nil { return nil, err } // Local elements such as secondary indexes should be excluded // from the (global) schemaId. - schemaBuf, err := json.Marshal(col.desc.Schema) + schemaBuf, err := json.Marshal(schema) if err != nil { return nil, err } @@ -169,16 +141,16 @@ func (db *db) createCollection( return nil, err } schemaID := cid.String() - col.schemaID = schemaID // For new schemas the initial version id will match the schema id schemaVersionID := schemaID - col.desc.Schema.VersionID = schemaVersionID - col.desc.Schema.SchemaID = schemaID + schema.VersionID = schemaVersionID + schema.SchemaID = schemaID + desc.Schema = schema // buffer must include all the ids, as it is saved and loaded from the store later. - buf, err := json.Marshal(col.desc) + buf, err := json.Marshal(desc) if err != nil { return nil, err } @@ -214,7 +186,8 @@ func (db *db) createCollection( return nil, err } } - return col, nil + + return db.getCollectionByName(ctx, txn, desc.Name) } // updateCollection updates the persisted collection description matching the name of the given @@ -229,24 +202,40 @@ func (db *db) updateCollection( ctx context.Context, txn datastore.Txn, existingDescriptionsByName map[string]client.CollectionDescription, - proposedDescriptionsByName map[string]client.CollectionDescription, - desc client.CollectionDescription, + existingSchemaByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + def client.CollectionDefinition, setAsDefaultVersion bool, ) (client.Collection, error) { - hasChanged, err := db.validateUpdateCollection(ctx, txn, existingDescriptionsByName, proposedDescriptionsByName, desc) + schema := def.Schema + desc := def.Description + + hasChanged, err := db.validateUpdateCollection(ctx, existingDescriptionsByName, desc) + if err != nil { + return nil, err + } + + hasSchemaChanged, err := db.validateUpdateSchema( + ctx, + txn, + existingSchemaByName, + proposedDescriptionsByName, + schema, + ) if err != nil { return nil, err } + hasChanged = hasChanged || hasSchemaChanged if !hasChanged { return db.getCollectionByName(ctx, txn, desc.Name) } - for _, field := range desc.Schema.Fields { + for _, field := range schema.Fields { if field.RelationType.IsSet(client.Relation_Type_ONE) { idFieldName := field.Name + "_id" - if _, ok := desc.Schema.GetField(idFieldName); !ok { - desc.Schema.Fields = append(desc.Schema.Fields, client.FieldDescription{ + if _, ok := schema.GetField(idFieldName); !ok { + schema.Fields = append(schema.Fields, client.FieldDescription{ Name: idFieldName, Kind: client.FieldKind_DocKey, RelationType: client.Relation_Type_INTERNAL_ID, @@ -256,23 +245,23 @@ func (db *db) updateCollection( } } - for i, field := range desc.Schema.Fields { + for i, field := range schema.Fields { if field.ID == client.FieldID(0) { // This is not wonderful and will probably break when we add the ability // to delete fields, however it is good enough for now and matches the // create behaviour. field.ID = client.FieldID(i) - desc.Schema.Fields[i] = field + schema.Fields[i] = field } if field.Typ == client.NONE_CRDT { // If no CRDT Type has been provided, default to LWW_REGISTER. field.Typ = client.LWW_REGISTER - desc.Schema.Fields[i] = field + schema.Fields[i] = field } } - globalSchemaBuf, err := json.Marshal(desc.Schema) + globalSchemaBuf, err := json.Marshal(schema) if err != nil { return nil, err } @@ -281,9 +270,10 @@ func (db *db) updateCollection( if err != nil { return nil, err } - previousSchemaVersionID := desc.Schema.VersionID + previousSchemaVersionID := schema.VersionID schemaVersionID := cid.String() - desc.Schema.VersionID = schemaVersionID + schema.VersionID = schemaVersionID + desc.Schema = schema buf, err := json.Marshal(desc) if err != nil { @@ -298,14 +288,14 @@ func (db *db) updateCollection( return nil, err } - schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Schema.SchemaID, previousSchemaVersionID) + schemaVersionHistoryKey := core.NewSchemaHistoryKey(schema.SchemaID, previousSchemaVersionID) err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) if err != nil { return nil, err } if setAsDefaultVersion { - err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, desc.Schema.SchemaID, schemaVersionID) + err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, schema.SchemaID, schemaVersionID) if err != nil { return nil, err } @@ -320,9 +310,7 @@ func (db *db) updateCollection( // collection. Will return an error if it fails validation. func (db *db) validateUpdateCollection( ctx context.Context, - txn datastore.Txn, existingDescriptionsByName map[string]client.CollectionDescription, - proposedDescriptionsByName map[string]client.CollectionDescription, proposedDesc client.CollectionDescription, ) (bool, error) { if proposedDesc.Name == "" { @@ -338,50 +326,73 @@ func (db *db) validateUpdateCollection( return false, NewErrCollectionIDDoesntMatch(proposedDesc.Name, existingDesc.ID, proposedDesc.ID) } - if proposedDesc.Schema.SchemaID != existingDesc.Schema.SchemaID { + hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) + return hasChangedIndexes, err +} + +// validateUpdateSchema validates that the given schema description is a valid update. +// +// Will return true if the given description differs from the current persisted state of the +// schema. Will return an error if it fails validation. +func (db *db) validateUpdateSchema( + ctx context.Context, + txn datastore.Txn, + existingDescriptionsByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + proposedDesc client.SchemaDescription, +) (bool, error) { + if proposedDesc.Name == "" { + return false, ErrSchemaNameEmpty + } + + existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] + if !collectionExists { + return false, NewErrAddCollectionWithPatch(proposedDesc.Name) + } + + if proposedDesc.SchemaID != existingDesc.SchemaID { return false, NewErrSchemaIDDoesntMatch( proposedDesc.Name, - existingDesc.Schema.SchemaID, - proposedDesc.Schema.SchemaID, + existingDesc.SchemaID, + proposedDesc.SchemaID, ) } - if proposedDesc.Schema.Name != existingDesc.Schema.Name { + if proposedDesc.Name != existingDesc.Name { // There is actually little reason to not support this atm besides controlling the surface area // of the new feature. Changing this should not break anything, but it should be tested first. - return false, NewErrCannotModifySchemaName(existingDesc.Schema.Name, proposedDesc.Schema.Name) + return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name) } - if proposedDesc.Schema.VersionID != "" && proposedDesc.Schema.VersionID != existingDesc.Schema.VersionID { + if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { // If users specify this it will be overwritten, an error is prefered to quietly ignoring it. return false, ErrCannotSetVersionID } - hasChangedFields, err := validateUpdateCollectionFields(proposedDescriptionsByName, existingDesc, proposedDesc) + hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc) if err != nil { return hasChangedFields, err } - hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) - return hasChangedFields || hasChangedIndexes, err + return hasChangedFields, err } -func validateUpdateCollectionFields( - descriptionsByName map[string]client.CollectionDescription, - existingDesc client.CollectionDescription, - proposedDesc client.CollectionDescription, +func validateUpdateSchemaFields( + descriptionsByName map[string]client.SchemaDescription, + existingDesc client.SchemaDescription, + proposedDesc client.SchemaDescription, ) (bool, error) { hasChanged := false existingFieldsByID := map[client.FieldID]client.FieldDescription{} existingFieldIndexesByName := map[string]int{} - for i, field := range existingDesc.Schema.Fields { + for i, field := range existingDesc.Fields { existingFieldIndexesByName[field.Name] = i existingFieldsByID[field.ID] = field } newFieldNames := map[string]struct{}{} newFieldIds := map[client.FieldID]struct{}{} - for proposedIndex, proposedField := range proposedDesc.Schema.Fields { + for proposedIndex, proposedField := range proposedDesc.Fields { var existingField client.FieldDescription var fieldAlreadyExists bool if proposedField.ID != client.FieldID(0) || @@ -449,7 +460,7 @@ func validateUpdateCollectionFields( if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT { idFieldName := proposedField.Name + request.RelatedObjectID - idField, idFieldFound := proposedDesc.Schema.GetField(idFieldName) + idField, idFieldFound := proposedDesc.GetField(idFieldName) if idFieldFound { if idField.Kind != client.FieldKind_DocKey { return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocKey, idField.Kind) @@ -471,7 +482,7 @@ func validateUpdateCollectionFields( var relatedFieldFound bool var relatedField client.FieldDescription - for _, field := range relatedDesc.Schema.Fields { + for _, field := range relatedDesc.Fields { if field.RelationName == proposedField.RelationName && !field.RelationType.IsSet(client.Relation_Type_INTERNAL_ID) && !(relatedDesc.Name == proposedDesc.Name && field.Name == proposedField.Name) { @@ -545,7 +556,7 @@ func validateUpdateCollectionFields( newFieldIds[proposedField.ID] = struct{}{} } - for _, field := range existingDesc.Schema.Fields { + for _, field := range existingDesc.Fields { if _, stillExists := newFieldIds[field.ID]; !stillExists { return false, NewErrCannotDeleteField(field.Name, field.ID) } @@ -600,12 +611,17 @@ func (db *db) setDefaultSchemaVersion( return err } - cols, err := db.getCollectionDescriptions(ctx, txn) + cols, err := db.getAllCollections(ctx, txn) if err != nil { return err } - return db.parser.SetSchema(ctx, txn, cols) + definitions := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + definitions[i] = col.Definition() + } + + return db.parser.SetSchema(ctx, txn, definitions) } func (db *db) setDefaultSchemaVersionExplicit( @@ -650,10 +666,11 @@ func (db *db) getCollectionByVersionID( } col := &collection{ - db: db, - desc: desc, - colID: desc.ID, - schemaID: desc.Schema.SchemaID, + db: db, + def: client.CollectionDefinition{ + Description: desc, + Schema: desc.Schema, + }, } err = col.loadIndexes(ctx, txn) @@ -751,7 +768,7 @@ func (c *collection) getAllDocKeysChan( txn datastore.Txn, ) (<-chan client.DocKeysResult, error) { prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), } q, err := txn.Datastore().Query(ctx, query.Query{ Prefix: prefix.ToString(), @@ -806,26 +823,30 @@ func (c *collection) getAllDocKeysChan( // Description returns the client.CollectionDescription. func (c *collection) Description() client.CollectionDescription { - return c.desc + return c.Definition().Description } // Name returns the collection name. func (c *collection) Name() string { - return c.desc.Name + return c.Description().Name } // Schema returns the Schema of the collection. func (c *collection) Schema() client.SchemaDescription { - return c.desc.Schema + return c.Definition().Schema } // ID returns the ID of the collection. func (c *collection) ID() uint32 { - return c.colID + return c.Description().ID } func (c *collection) SchemaID() string { - return c.schemaID + return c.Schema().SchemaID +} + +func (c *collection) Definition() client.CollectionDefinition { + return c.def } // WithTxn returns a new instance of the collection, with a transaction @@ -834,9 +855,7 @@ func (c *collection) WithTxn(txn datastore.Txn) client.Collection { return &collection{ db: c.db, txn: immutable.Some(txn), - desc: c.desc, - colID: c.colID, - schemaID: c.schemaID, + def: c.def, indexes: c.indexes, fetcherFactory: c.fetcherFactory, } @@ -894,7 +913,7 @@ func (c *collection) getKeysFromDoc( func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.Document) error { // This has to be done before dockey verification happens in the next step. - if err := doc.RemapAliasFieldsAndDockey(c.desc.Schema.Fields); err != nil { + if err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields); err != nil { return err } @@ -1050,7 +1069,7 @@ func (c *collection) save( return cid.Undef, client.NewErrFieldNotExist(k) } - fieldDescription, valid := c.desc.Schema.GetField(k) + fieldDescription, valid := c.Schema().GetField(k) if !valid { return cid.Undef, client.NewErrFieldNotExist(k) } @@ -1121,7 +1140,7 @@ func (c *collection) save( events.Update{ DocKey: doc.Key().String(), Cid: headNode.Cid(), - SchemaID: c.schemaID, + SchemaID: c.Schema().SchemaID, Block: headNode, Priority: priority, }, @@ -1152,7 +1171,7 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist( return nil } - objFieldDescription, ok := c.desc.Schema.GetField(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) + objFieldDescription, ok := c.Schema().GetField(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) if !ok { return client.NewErrFieldNotExist(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) } @@ -1320,10 +1339,17 @@ func (c *collection) saveValueToMerkleCRDT( if err != nil { return nil, 0, err } - field, _ := c.Description().GetFieldByID(client.FieldID(fieldID)) + + schema := c.Schema() + + field, ok := c.Description().GetFieldByID(client.FieldID(fieldID), &schema) + if !ok { + return nil, 0, client.NewErrFieldIndexNotExist(fieldID) + } + merkleCRDT, err := c.db.crdtFactory.InstanceWithStores( txn, - core.NewCollectionSchemaVersionKey(c.Schema().VersionID), + core.NewCollectionSchemaVersionKey(schema.VersionID), c.db.events.Updates, ctype, key, @@ -1334,7 +1360,6 @@ func (c *collection) saveValueToMerkleCRDT( } var bytes []byte - var ok bool // parse args if len(args) != 1 { return nil, 0, ErrUnknownCRDTArgument @@ -1417,14 +1442,14 @@ func (c *collection) commitImplicitTxn(ctx context.Context, txn datastore.Txn) e func (c *collection) getPrimaryKeyFromDocKey(docKey client.DocKey) core.PrimaryDataStoreKey { return core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), DocKey: docKey.String(), } } func (c *collection) getDSKeyFromDockey(docKey client.DocKey) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: fmt.Sprint(c.colID), + CollectionID: fmt.Sprint(c.ID()), DocKey: docKey.String(), InstanceType: core.ValueKey, } @@ -1446,7 +1471,7 @@ func (c *collection) tryGetFieldKey(key core.PrimaryDataStoreKey, fieldName stri // tryGetSchemaFieldID returns the FieldID of the given fieldName. // Will return false if the field is not found. func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) { - for _, field := range c.desc.Schema.Fields { + for _, field := range c.Schema().Fields { if field.Name == fieldName { if field.IsObject() || field.IsObjectArray() { // We do not wish to match navigational properties, only diff --git a/db/collection_delete.go b/db/collection_delete.go index 480656849f..a24eb496f6 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -207,7 +207,7 @@ func (c *collection) deleteWithFilter( // Convert from string to client.DocKey. key := core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), DocKey: docKey, } @@ -281,7 +281,7 @@ func (c *collection) applyDelete( events.Update{ DocKey: key.DocKey, Cid: headNode.Cid(), - SchemaID: c.schemaID, + SchemaID: c.Schema().SchemaID, Block: headNode, Priority: priority, }, diff --git a/db/collection_get.go b/db/collection_get.go index 8262ff44ba..d210072793 100644 --- a/db/collection_get.go +++ b/db/collection_get.go @@ -53,16 +53,15 @@ func (c *collection) get( ) (*client.Document, error) { // create a new document fetcher df := c.newFetcher() - desc := &c.desc // initialize it with the primary index - err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, showDeleted) + err := df.Init(ctx, txn, c, fields, nil, nil, false, showDeleted) if err != nil { _ = df.Close() return nil, err } // construct target key for DocKey - targetKey := base.MakeDocKey(*desc, key.DocKey) + targetKey := base.MakeDocKey(c.Description(), key.DocKey) // run the doc fetcher err = df.Start(ctx, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) if err != nil { diff --git a/db/collection_index.go b/db/collection_index.go index f3c1ba2e98..278586902b 100644 --- a/db/collection_index.go +++ b/db/collection_index.go @@ -121,10 +121,11 @@ func (c *collection) updateIndexedDoc( return err } desc := c.Description() + schema := c.Schema() oldDoc, err := c.get( ctx, txn, - c.getPrimaryKeyFromDocKey(doc.Key()), desc.CollectIndexedFields(&desc.Schema), + c.getPrimaryKeyFromDocKey(doc.Key()), desc.CollectIndexedFields(&schema), false, ) if err != nil { @@ -217,7 +218,7 @@ func (c *collection) createIndex( if err != nil { return nil, err } - c.desc.Indexes = append(c.desc.Indexes, colIndex.Description()) + c.def.Description.Indexes = append(c.def.Description.Indexes, colIndex.Description()) c.indexes = append(c.indexes, colIndex) err = c.indexExistingDocs(ctx, txn, colIndex) if err != nil { @@ -233,12 +234,12 @@ func (c *collection) iterateAllDocs( exec func(doc *client.Document) error, ) error { df := c.newFetcher() - err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, false) + err := df.Init(ctx, txn, c, fields, nil, nil, false, false) if err != nil { _ = df.Close() return err } - start := base.MakeCollectionKey(c.desc) + start := base.MakeCollectionKey(c.Description()) spans := core.NewSpans(core.NewSpan(start, start.PrefixEnd())) err = df.Start(ctx, spans) @@ -278,8 +279,8 @@ func (c *collection) indexExistingDocs( ) error { fields := make([]client.FieldDescription, 0, 1) for _, field := range index.Description().Fields { - for i := range c.desc.Schema.Fields { - colField := c.desc.Schema.Fields[i] + for i := range c.Schema().Fields { + colField := c.Schema().Fields[i] if field.Name == colField.Name { fields = append(fields, colField) break @@ -333,9 +334,9 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName return NewErrIndexWithNameDoesNotExists(indexName) } - for i := range c.desc.Indexes { - if c.desc.Indexes[i].Name == indexName { - c.desc.Indexes = append(c.desc.Indexes[:i], c.desc.Indexes[i+1:]...) + for i := range c.Description().Indexes { + if c.Description().Indexes[i].Name == indexName { + c.def.Description.Indexes = append(c.Description().Indexes[:i], c.Description().Indexes[i+1:]...) break } } @@ -379,7 +380,7 @@ func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error { } colIndexes = append(colIndexes, index) } - c.desc.Indexes = indexDescriptions + c.def.Description.Indexes = indexDescriptions c.indexes = colIndexes return nil } @@ -396,14 +397,14 @@ func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, if err != nil { return nil, err } - return c.desc.Indexes, nil + return c.Description().Indexes, nil } func (c *collection) checkExistingFields( ctx context.Context, fields []client.IndexedFieldDescription, ) error { - collectionFields := c.Description().Schema.Fields + collectionFields := c.Schema().Fields for _, field := range fields { found := false for _, colField := range collectionFields { diff --git a/db/collection_test.go b/db/collection_test.go index e3686504d3..dd57cb285b 100644 --- a/db/collection_test.go +++ b/db/collection_test.go @@ -12,320 +12,11 @@ package db import ( "context" - "reflect" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" ) -func newTestCollectionWithSchema( - t *testing.T, - ctx context.Context, - db *implicitTxnDB, -) (client.Collection, error) { - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "Age", - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: "Weight", - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - col, err := db.createCollection(ctx, txn, desc) - if err != nil { - return col, err - } - - return col, txn.Commit(ctx) -} - -func TestNewCollection_ReturnsError_GivenNoSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - _, err = db.createCollection(ctx, txn, client.CollectionDescription{ - Name: "test", - }) - assert.Error(t, err) -} - -func TestNewCollectionWithSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - schema := col.Schema() - desc := col.Description() - - assert.True(t, reflect.DeepEqual(schema, desc.Schema)) - assert.Equal(t, "users", col.Name()) - assert.Equal(t, uint32(1), col.ID()) - assert.False(t, reflect.DeepEqual(schema, client.SchemaDescription{})) - assert.Equal(t, 4, len(schema.Fields)) - - for i := 0; i < 4; i++ { - assert.Equal(t, client.FieldID(i), schema.Fields[i].ID) - } -} - -func TestNewCollectionReturnsErrorGivenDuplicateSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.Errorf(t, err, "collection already exists") -} - -func TestNewCollectionReturnsErrorGivenNoFields(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{}, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection, PropertyName: Fields", - ) -} - -func TestNewCollectionReturnsErrorGivenNoName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{}, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection, PropertyName: Name", - ) -} - -func TestNewCollectionReturnsErrorGivenNoKeyField(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError(t, err, "collection schema first field must be a DocKey") -} - -func TestNewCollectionReturnsErrorGivenKeyFieldIsNotFirstField(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError(t, err, "collection schema first field must be a DocKey") -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: Name", - ) -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoKind(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: FieldKind", - ) -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoType(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: CRDT type", - ) -} - -func TestGetCollectionByName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "users") - assert.NoError(t, err) - - schema := col.Schema() - desc := col.Description() - - assert.True(t, reflect.DeepEqual(schema, desc.Schema)) - assert.Equal(t, "users", col.Name()) - assert.Equal(t, uint32(1), col.ID()) - assert.False(t, reflect.DeepEqual(schema, client.SchemaDescription{})) - assert.Equal(t, 4, len(schema.Fields)) - - for i := 0; i < 4; i++ { - assert.Equal(t, client.FieldID(i), schema.Fields[i].ID) - } -} - func TestGetCollectionByNameReturnsErrorGivenNonExistantCollection(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) diff --git a/db/collection_update.go b/db/collection_update.go index 2e353dd0d3..c68902db44 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -303,13 +303,13 @@ func (c *collection) applyMergeToDoc( }) for mfield, mval := range mergeMap { - fd, isValidField := c.desc.Schema.GetField(mfield) + fd, isValidField := c.Schema().GetField(mfield) if !isValidField { return client.NewErrFieldNotExist(mfield) } if fd.Kind == client.FieldKind_FOREIGN_OBJECT { - fd, isValidField = c.desc.Schema.GetField(mfield + request.RelatedObjectID) + fd, isValidField = c.Schema().GetField(mfield + request.RelatedObjectID) if !isValidField { return client.NewErrFieldNotExist(mfield) } @@ -335,7 +335,7 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie return client.FieldDescription{}, false } - relationFieldDescription, valid := c.Description().Schema.GetField( + relationFieldDescription, valid := c.Schema().GetField( strings.TrimSuffix(fieldDesc.Name, request.RelatedObjectID), ) return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation() @@ -365,17 +365,19 @@ func (c *collection) patchPrimaryDoc( return err } primaryCol = primaryCol.WithTxn(txn) + primarySchema := primaryCol.Schema() primaryField, ok := primaryCol.Description().GetFieldByRelation( relationFieldDescription.RelationName, secondaryCollectionName, relationFieldDescription.Name, + &primarySchema, ) if !ok { return client.NewErrFieldNotExist(relationFieldDescription.RelationName) } - primaryIDField, ok := primaryCol.Description().Schema.GetField(primaryField.Name + request.RelatedObjectID) + primaryIDField, ok := primaryCol.Schema().GetField(primaryField.Name + request.RelatedObjectID) if !ok { return client.NewErrFieldNotExist(primaryField.Name + request.RelatedObjectID) } diff --git a/db/db_test.go b/db/db_test.go index c1a9648f36..9d681058c8 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -15,14 +15,8 @@ import ( "testing" badger "github.com/dgraph-io/badger/v4" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/stretchr/testify/assert" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/merkle/clock" ) func newMemoryDB(ctx context.Context) (*implicitTxnDB, error) { @@ -48,369 +42,3 @@ func TestNewDB(t *testing.T) { t.Error(err) } } - -func TestDBSaveSimpleDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - if err != nil { - t.Error(err) - } - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal(t, int64(21), age) - assert.Equal(t, 154.1, weight) - - _, err = doc.Get("DoesntExist") - assert.Error(t, err) - - // db.printDebugDB() -} - -func TestDBUpdateDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - if err != nil { - t.Error(err) - } - - // update fields - doc.Set("Name", "Pete") - doc.Delete("Weight") - - weightField := doc.Fields()["Weight"] - weightVal, _ := doc.GetValueWithField(weightField) - assert.True(t, weightVal.IsDelete()) - - err = col.Update(ctx, doc) - if err != nil { - t.Error(err) - } - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "Pete", name) - assert.Equal(t, int64(21), age) - assert.Nil(t, weight) -} - -func TestDBUpdateNonExistingDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Update(ctx, doc) - assert.Error(t, err) -} - -func TestDBUpdateExistingDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - testJSONObj = []byte(`{ - "_key": "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - "Name": "Pete", - "Age": 31 - }`) - - doc, err = client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Update(ctx, doc) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - // weight, err := doc.Get("Weight") - // assert.NoError(t, err) - - assert.Equal(t, "Pete", name) - assert.Equal(t, int64(31), age) -} - -func TestDBGetDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - doc, err = col.Get(ctx, key, false) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal( - t, - uint64(21), - age, - ) // note: uint is used here, because the CBOR implementation converts all positive ints to uint64 - assert.Equal(t, 154.1, weight) -} - -func TestDBGetNotFoundDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - _, err = col.Get(ctx, key, false) - assert.EqualError(t, err, client.ErrDocumentNotFound.Error()) -} - -func TestDBDeleteDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - deleted, err := col.Delete(ctx, key) - assert.NoError(t, err) - assert.True(t, deleted) -} - -func TestDBDeleteNotFoundDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - deleted, err := col.Delete(ctx, key) - assert.EqualError(t, err, client.ErrDocumentNotFound.Error()) - assert.False(t, deleted) -} - -func TestDocumentMerkleDAG(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - clk := clock.NewMerkleClock( - db.multistore.Headstore(), - nil, - core.HeadStoreKey{}.WithDocKey( - "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - ).WithFieldId( - "Name", - ), - nil, - ) - heads := clk.(*clock.MerkleClock).Heads() - cids, _, err := heads.List(ctx) - assert.NoError(t, err) - - reg := corecrdt.LWWRegister{} - for _, c := range cids { - b, errGet := db.Blockstore().Get(ctx, c) - assert.NoError(t, errGet) - - nd, errDecode := dag.DecodeProtobuf(b.RawData()) - assert.NoError(t, errDecode) - - _, errMarshal := nd.MarshalJSON() - assert.NoError(t, errMarshal) - - _, errDeltaDecode := reg.DeltaDecode(nd) - assert.NoError(t, errDeltaDecode) - } - - testJSONObj = []byte(`{ - "_key": "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - "Name": "Pete", - "Age": 31 - }`) - - doc, err = client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Update(ctx, doc) - assert.NoError(t, err) - - heads = clk.(*clock.MerkleClock).Heads() - cids, _, err = heads.List(ctx) - assert.NoError(t, err) - - for _, c := range cids { - b, err := db.Blockstore().Get(ctx, c) - assert.NoError(t, err) - - nd, err := dag.DecodeProtobuf(b.RawData()) - assert.NoError(t, err) - - _, err = nd.MarshalJSON() - assert.NoError(t, err) - - _, err = reg.DeltaDecode(nd) - assert.NoError(t, err) - } -} - -// collection with schema -func TestDBSchemaSaveSimpleDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal(t, int64(21), age) - - err = db.PrintDump(ctx) - assert.Nil(t, err) -} diff --git a/db/errors.go b/db/errors.go index 4a456cd41a..d4e883c11e 100644 --- a/db/errors.go +++ b/db/errors.go @@ -111,9 +111,9 @@ var ( ErrDocumentDeleted = errors.New(errDocumentDeleted) ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") ErrUnknownCRDT = errors.New("unknown crdt") - ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") ErrCollectionAlreadyExists = errors.New("collection already exists") ErrCollectionNameEmpty = errors.New("collection name can't be empty") + ErrSchemaNameEmpty = errors.New("schema name can't be empty") ErrSchemaIDEmpty = errors.New("schema ID can't be empty") ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") ErrKeyEmpty = errors.New("key cannot be empty") diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index 8935e617cc..da7a0df1e1 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -57,7 +57,7 @@ type Fetcher interface { Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -81,7 +81,7 @@ var ( // DocumentFetcher is a utility to incrementally fetch all the documents. type DocumentFetcher struct { - col *client.CollectionDescription + col client.Collection reverse bool deletedDocs bool @@ -137,7 +137,7 @@ type DocumentFetcher struct { func (df *DocumentFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -145,9 +145,6 @@ func (df *DocumentFetcher) Init( showDeleted bool, ) error { df.txn = txn - if col.Schema.IsEmpty() { - return client.NewErrUninitializeProperty("DocumentFetcher", "Schema") - } err := df.init(col, fields, filter, docmapper, reverse) if err != nil { @@ -166,7 +163,7 @@ func (df *DocumentFetcher) Init( } func (df *DocumentFetcher) init( - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docMapper *core.DocumentMapping, @@ -202,7 +199,7 @@ func (df *DocumentFetcher) init( // get them all var targetFields []client.FieldDescription if len(fields) == 0 { - targetFields = df.col.Schema.Fields + targetFields = df.col.Schema().Fields } else { targetFields = fields } @@ -213,12 +210,12 @@ func (df *DocumentFetcher) init( if df.filter != nil { conditions := df.filter.ToMap(df.mapping) - parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema) + parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema()) if err != nil { return err } df.filterFields = make(map[uint32]client.FieldDescription, len(parsedfilterFields)) - df.filterSet = bitset.New(uint(len(col.Schema.Fields))) + df.filterSet = bitset.New(uint(len(col.Schema().Fields))) for _, field := range parsedfilterFields { df.filterFields[uint32(field.ID)] = field df.filterSet.Set(uint(field.ID)) @@ -253,7 +250,7 @@ func (df *DocumentFetcher) start(ctx context.Context, spans core.Spans, withDele df.deletedDocs = withDeleted if !spans.HasValue { // no specified spans so create a prefix scan key for the entire collection - start := base.MakeCollectionKey(*df.col) + start := base.MakeCollectionKey(df.col.Description()) if withDeleted { start = start.WithDeletedFlag() } else { diff --git a/db/fetcher/indexer.go b/db/fetcher/indexer.go index da4dc6a580..a0ee94d0b9 100644 --- a/db/fetcher/indexer.go +++ b/db/fetcher/indexer.go @@ -24,7 +24,7 @@ import ( // It fetches only the indexed field and the rest of the fields are fetched by the internal fetcher. type IndexFetcher struct { docFetcher Fetcher - col *client.CollectionDescription + col client.Collection txn datastore.Txn indexFilter *mapper.Filter docFilter *mapper.Filter @@ -55,7 +55,7 @@ func NewIndexFetcher( func (f *IndexFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docMapper *core.DocumentMapping, @@ -68,14 +68,14 @@ func (f *IndexFetcher) Init( f.mapping = docMapper f.txn = txn - for _, index := range col.Indexes { + for _, index := range col.Description().Indexes { if index.Fields[0].Name == f.indexedField.Name { f.indexDataStoreKey.IndexID = index.ID break } } - f.indexDataStoreKey.CollectionID = f.col.ID + f.indexDataStoreKey.CollectionID = f.col.ID() for i := range fields { if fields[i].Name == f.indexedField.Name { @@ -131,7 +131,7 @@ func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo f.execInfo.FieldsFetched++ if f.docFetcher != nil && len(f.docFields) > 0 { - targetKey := base.MakeDocKey(*f.col, string(f.doc.key)) + targetKey := base.MakeDocKey(f.col.Description(), string(f.doc.key)) spans := core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd())) err = f.docFetcher.Start(ctx, spans) if err != nil { diff --git a/db/fetcher/mocks/fetcher.go b/db/fetcher/mocks/fetcher.go index 79eefefc2b..1597b13b2e 100644 --- a/db/fetcher/mocks/fetcher.go +++ b/db/fetcher/mocks/fetcher.go @@ -134,11 +134,11 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche } // Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted -func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { +func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) } else { r0 = ret.Error(0) @@ -155,7 +155,7 @@ type Fetcher_Init_Call struct { // Init is a helper method to define mock.On call // - ctx context.Context // - txn datastore.Txn -// - col *client.CollectionDescription +// - col client.Collection // - fields []client.FieldDescription // - filter *mapper.Filter // - docmapper *core.DocumentMapping @@ -165,9 +165,9 @@ func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)} } -func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(*client.CollectionDescription), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) + run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(client.Collection), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) }) return _c } @@ -177,7 +177,7 @@ func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call { return _c } -func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { _c.Call.Return(run) return _c } diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go index f1c7b6a9de..da670b1c27 100644 --- a/db/fetcher/versioned.go +++ b/db/fetcher/versioned.go @@ -92,7 +92,7 @@ type VersionedFetcher struct { queuedCids *list.List - col *client.CollectionDescription + col client.Collection // @todo index *client.IndexDescription mCRDTs map[uint32]crdt.MerkleCRDT } @@ -101,7 +101,7 @@ type VersionedFetcher struct { func (vf *VersionedFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -357,13 +357,14 @@ func (vf *VersionedFetcher) merge(c cid.Cid) error { return err } - fieldID := vf.col.Schema.GetFieldKey(l.Name) - if fieldID == uint32(0) { + schema := vf.col.Schema() + field, ok := vf.col.Description().GetFieldByName(l.Name, &schema) + if !ok { return client.NewErrFieldNotExist(l.Name) } // @todo: Right now we ONLY handle LWW_REGISTER, need to swith on this and // get CType from descriptions - if err := vf.processNode(fieldID, subNd, client.LWW_REGISTER, l.Name); err != nil { + if err := vf.processNode(uint32(field.ID), subNd, client.LWW_REGISTER, l.Name); err != nil { return err } } @@ -380,7 +381,7 @@ func (vf *VersionedFetcher) processNode( // handle CompositeDAG mcrdt, exists := vf.mCRDTs[crdtIndex] if !exists { - key, err := base.MakePrimaryIndexKeyForCRDT(*vf.col, ctype, vf.key, fieldName) + key, err := base.MakePrimaryIndexKeyForCRDT(vf.col.Description(), vf.col.Schema(), ctype, vf.key, fieldName) if err != nil { return err } diff --git a/db/fetcher_test.go b/db/fetcher_test.go index e2c3647792..f7de9bf036 100644 --- a/db/fetcher_test.go +++ b/db/fetcher_test.go @@ -16,169 +16,13 @@ import ( "github.com/stretchr/testify/assert" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" ) -func newTestCollectionDescription() client.CollectionDescription { - return client.CollectionDescription{ - Name: "users", - ID: uint32(1), - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - ID: client.FieldID(1), - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - ID: client.FieldID(2), - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "Age", - ID: client.FieldID(3), - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } -} - -func newTestFetcher(ctx context.Context, txn datastore.Txn) (*fetcher.DocumentFetcher, error) { - df := new(fetcher.DocumentFetcher) - desc := newTestCollectionDescription() - err := df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - if err != nil { - return nil, err - } - return df, nil -} - -func TestFetcherInit(t *testing.T) { - _, err := newTestFetcher(context.Background(), nil) - assert.NoError(t, err) -} - -func TestFetcherStart(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - if err != nil { - t.Error(err) - return - } - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - df, err := newTestFetcher(ctx, txn) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) -} - func TestFetcherStartWithoutInit(t *testing.T) { ctx := context.Background() df := new(fetcher.DocumentFetcher) err := df.Start(ctx, core.Spans{}) assert.Error(t, err) } - -func TestMakeIndexPrefixKey(t *testing.T) { - desc := newTestCollectionDescription() - key := base.MakeCollectionKey(desc) - assert.Equal(t, "/1", key.ToString()) -} - -func TestFetcherGetAllPrimaryIndexEncodedDocSingle(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{ - "Name": "John", - "Age": 21 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - // db.printDebugDB() - - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) - - encdoc, _, err := df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) -} - -func TestFetcherGetAllPrimaryIndexEncodedDocMultiple(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{ - "Name": "John", - "Age": 21 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - doc, err = client.NewDocFromJSON([]byte(`{ - "Name": "Alice", - "Age": 27 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - // db.printDebugDB() - - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) - - encdoc, _, err := df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) - encdoc, _, err = df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) -} diff --git a/db/index.go b/db/index.go index 7314bc2a08..e1aaed6cb6 100644 --- a/db/index.go +++ b/db/index.go @@ -82,10 +82,8 @@ func NewCollectionIndex( return nil, NewErrIndexDescHasNoFields(desc) } index := &collectionSimpleIndex{collection: collection, desc: desc} - schema := collection.Description().Schema - fieldID := client.FieldID(schema.GetFieldKey(desc.Fields[0].Name)) - field, foundField := collection.Description().GetFieldByID(fieldID) - if fieldID == client.FieldID(0) || !foundField { + field, foundField := collection.Schema().GetField(desc.Fields[0].Name) + if !foundField { return nil, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name) } var e error diff --git a/db/index_test.go b/db/index_test.go index 67c3f232d0..d22746a363 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -15,6 +15,7 @@ import ( "encoding/binary" "encoding/json" "fmt" + "strings" "testing" ds "github.com/ipfs/go-datastore" @@ -55,71 +56,62 @@ type indexTestFixture struct { ctx context.Context db *implicitTxnDB txn datastore.Txn - users *collection + users client.Collection t *testing.T } -func getUsersCollectionDesc() client.CollectionDescription { - return client.CollectionDescription{ - Name: usersColName, - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: usersNameFieldName, - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: usersAgeFieldName, - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: usersWeightFieldName, - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } -} +func (f *indexTestFixture) getUsersCollectionDesc() client.Collection { + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + %s: String + %s: Int + %s: Float + }`, + usersColName, + usersNameFieldName, + usersAgeFieldName, + usersWeightFieldName, + ), + ) + require.NoError(f.t, err) -func getProductsCollectionDesc() client.CollectionDescription { - return client.CollectionDescription{ - Name: productsColName, - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: productsIDFieldName, - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: productsPriceFieldName, - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - { - Name: productsCategoryFieldName, - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: productsAvailableFieldName, - Kind: client.FieldKind_BOOL, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + col, err := f.db.GetCollectionByName(f.ctx, usersColName) + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + return col +} + +func (f *indexTestFixture) getProductsCollectionDesc() client.Collection { + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + %s: Int + %s: Float + %s: String + %s: Boolean + }`, + productsColName, + productsIDFieldName, + productsPriceFieldName, + productsCategoryFieldName, + productsAvailableFieldName, + ), + ) + require.NoError(f.t, err) + + col, err := f.db.GetCollectionByName(f.ctx, productsColName) + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + return col } func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { @@ -139,7 +131,7 @@ func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { func newIndexTestFixture(t *testing.T) *indexTestFixture { f := newIndexTestFixtureBare(t) - f.users = f.createCollection(getUsersCollectionDesc()) + f.users = f.getUsersCollectionDesc() return f } @@ -247,18 +239,6 @@ func (f *indexTestFixture) getCollectionIndexes(colName string) ([]client.IndexD return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colName) } -func (f *indexTestFixture) createCollection( - desc client.CollectionDescription, -) *collection { - col, err := f.db.createCollection(f.ctx, f.txn, desc) - assert.NoError(f.t, err) - err = f.txn.Commit(f.ctx) - assert.NoError(f.t, err) - f.txn, err = f.db.NewTxn(f.ctx, false) - assert.NoError(f.t, err) - return col.(*collection) -} - func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -324,28 +304,6 @@ func TestCreateIndex_IfFieldHasNoDirection_DefaultToAsc(t *testing.T) { assert.Equal(t, client.Ascending, newDesc.Fields[0].Direction) } -func TestCreateIndex_IfNameIsNotSpecified_Generate(t *testing.T) { - f := newIndexTestFixtureBare(t) - colDesc := getUsersCollectionDesc() - const colName = "UsErS" - const fieldName = "NaMe" - colDesc.Name = colName - colDesc.Schema.Name = colName // Which one should we use? - colDesc.Schema.Fields[1].Name = fieldName - f.users = f.createCollection(colDesc) - - desc := client.IndexDescription{ - Name: "", - Fields: []client.IndexedFieldDescription{ - {Name: fieldName, Direction: client.Ascending}, - }, - } - - newDesc, err := f.createCollectionIndex(desc) - assert.NoError(t, err) - assert.Equal(t, colName+"_"+fieldName+"_ASC", newDesc.Name) -} - func TestCreateIndex_IfSingleFieldInDescOrder_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -515,8 +473,8 @@ func TestCreateIndex_IfPropertyDoesntExist_ReturnError(t *testing.T) { func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCollection(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) - products := f.createCollection(getProductsCollectionDesc()) + users := f.getUsersCollectionDesc() + products := f.getProductsCollectionDesc() makeIndex := func(fieldName string) client.IndexDescription { return client.IndexDescription{ @@ -606,24 +564,16 @@ func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) const unsupportedKind = client.FieldKind_BOOL_ARRAY - desc := client.CollectionDescription{ - Name: "testTypeCol", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: unsupportedKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + _, err := f.db.AddSchema( + f.ctx, + `type testTypeCol { + field: [Boolean!] + }`, + ) + require.NoError(f.t, err) - collection := f.createCollection(desc) + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol") + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -631,7 +581,10 @@ func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) }, } - _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + _, err = f.createCollectionIndexFor(collection.Name(), indexDesc) require.ErrorIs(f.t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) f.commitTxn() } @@ -652,7 +605,7 @@ func TestCreateIndex_IfFailedToReadIndexUponRetrievingCollectionDesc_ReturnError onSystemStore.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Return(nil, testErr) - descData, err := json.Marshal(getUsersCollectionDesc()) + descData, err := json.Marshal(f.users.Description()) require.NoError(t, err) onSystemStore.Query(mock.Anything, mock.Anything). @@ -676,7 +629,9 @@ func TestGetIndexes_ShouldReturnListOfAllExistingIndexes(t *testing.T) { _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) assert.NoError(t, err) - f.createCollection(getProductsCollectionDesc()) + f.commitTxn() + + f.getProductsCollectionDesc() productsIndexDesc := client.IndexDescription{ Name: "products_description_index", Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, @@ -830,11 +785,17 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) assert.NoError(t, err) - f.createCollection(getProductsCollectionDesc()) + f.commitTxn() + + f.getProductsCollectionDesc() productsIndexDesc := client.IndexDescription{ Name: "products_description_index", Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, } + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) assert.NoError(t, err) @@ -1021,27 +982,23 @@ func TestCollectionGetIndexes_IfFailsToCreateTxn_ShouldNotCache(t *testing.T) { func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *testing.T) { f := newIndexTestFixtureBare(t) + f.getUsersCollectionDesc() const unsupportedKind = client.FieldKind_BOOL_ARRAY + _, err := f.db.AddSchema( + f.ctx, + `type testTypeCol { + name: String + field: [Boolean!] + }`, + ) + require.NoError(f.t, err) - desc := client.CollectionDescription{ - Name: "testTypeCol", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: unsupportedKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol") + require.NoError(f.t, err) - collection := f.createCollection(desc) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -1121,17 +1078,6 @@ func TestCollectionGetIndexes_IfIndexIsDropped_ReturnUpdateIndexes(t *testing.T) func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { f := newIndexTestFixtureBare(t) - colDesc := client.CollectionDescription{ - Name: "testCollection", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - }, - }, - } const ( num = 30 fieldNamePrefix = "field_" @@ -1142,17 +1088,33 @@ func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { return fmt.Sprintf("%02d", i) } + builder := strings.Builder{} + builder.WriteString("type testCollection {\n") + for i := 1; i <= num; i++ { - colDesc.Schema.Fields = append(colDesc.Schema.Fields, - client.FieldDescription{ - Name: fieldNamePrefix + toSuffix(i), - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }) + _, err := builder.WriteString(fieldNamePrefix) + require.NoError(f.t, err) + + _, err = builder.WriteString(toSuffix(i)) + require.NoError(f.t, err) + + _, err = builder.WriteString(": String\n") + require.NoError(f.t, err) } + _, err := builder.WriteString("}") + require.NoError(f.t, err) - collection := f.createCollection(colDesc) + _, err = f.db.AddSchema( + f.ctx, + builder.String(), + ) + require.NoError(f.t, err) + collection, err := f.db.GetCollectionByName(f.ctx, "testCollection") + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) for i := 1; i <= num; i++ { iStr := toSuffix(i) indexDesc := client.IndexDescription{ @@ -1319,7 +1281,7 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { assert.Equal(t, 2, f.countIndexPrefixes(usersColName, "")) - err = f.users.dropAllIndexes(f.ctx, f.txn) + err = f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.NoError(t, err) assert.Equal(t, 0, f.countIndexPrefixes(usersColName, "")) @@ -1331,7 +1293,7 @@ func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { f.db.Close(f.ctx) - err := f.users.dropAllIndexes(f.ctx, f.txn) + err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.Error(t, err) } @@ -1386,7 +1348,7 @@ func TestDropAllIndexes_IfSystemStorageFails_ReturnError(t *testing.T) { mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - err := f.users.dropAllIndexes(f.ctx, f.txn) + err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.ErrorIs(t, err, testErr, testCase.Name) } } @@ -1406,7 +1368,7 @@ func TestDropAllIndexes_ShouldCloseQueryIterator(t *testing.T) { mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - _ = f.users.dropAllIndexes(f.ctx, f.txn) + _ = f.users.(*collection).dropAllIndexes(f.ctx, f.txn) } func TestNewCollectionIndex_IfDescriptionHasNoFields_ReturnError(t *testing.T) { diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index 5634686778..5b25fab21a 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -234,11 +234,9 @@ func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_E systemStoreOn.Get(mock.Anything, colKey.ToDS()).Maybe().Return([]byte(userColVersionID), nil) colVersionIDKey := core.NewCollectionSchemaVersionKey(userColVersionID) - colDesc := getUsersCollectionDesc() - colDesc.ID = 1 - for i := range colDesc.Schema.Fields { - colDesc.Schema.Fields[i].ID = client.FieldID(i) - } + usersCol, err := f.db.GetCollectionByName(f.ctx, usersColName) + require.NoError(f.t, err) + colDesc := usersCol.Description() colDescBytes, err := json.Marshal(colDesc) require.NoError(f.t, err) systemStoreOn.Get(mock.Anything, colVersionIDKey.ToDS()).Maybe().Return(colDescBytes, nil) @@ -361,8 +359,8 @@ func TestNonUnique_IfIndexIntField_StoreIt(t *testing.T) { func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) - products := f.createCollection(getProductsCollectionDesc()) + users := f.getUsersCollectionDesc() + products := f.getProductsCollectionDesc() _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) require.NoError(f.t, err) @@ -437,24 +435,23 @@ func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { } for i, tc := range testCase { - desc := client.CollectionDescription{ - Name: "testTypeCol" + strconv.Itoa(i), - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: tc.FieldKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + field: %s + }`, + "testTypeCol"+strconv.Itoa(i), + tc.FieldKind.String(), + ), + ) + require.NoError(f.t, err) + + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol"+strconv.Itoa(i)) + require.NoError(f.t, err) - collection := f.createCollection(desc) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -462,7 +459,7 @@ func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { }, } - _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + _, err = f.createCollectionIndexFor(collection.Name(), indexDesc) require.NoError(f.t, err) f.commitTxn() @@ -596,7 +593,7 @@ func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *t doc := f.newUserDoc("John", 21) f.saveDocToCollection(doc, f.users) - f.users.fetcherFactory = tc.PrepareFetcher + f.users.(*collection).fetcherFactory = tc.PrepareFetcher key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) @@ -614,7 +611,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) f.saveDocToCollection(doc, f.users) fieldKeyString := core.DataStoreKey{ - CollectionID: f.users.desc.IDString(), + CollectionID: f.users.Description().IDString(), }.WithDocKey(doc.Key().String()). WithFieldId("1"). WithValueFlag(). @@ -623,7 +620,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) invalidKeyString := fieldKeyString + "/doesn't matter/" // Insert an invalid key within the document prefix, this will generate an error within the fetcher. - f.users.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) + f.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) require.ErrorIs(f.t, err, core.ErrInvalidKey) @@ -631,7 +628,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) + users := f.getUsersCollectionDesc() _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) require.NoError(f.t, err) _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnAge()) @@ -643,7 +640,7 @@ func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f.saveDocToCollection(f.newUserDoc("John", 21), users) f.saveDocToCollection(f.newUserDoc("Islam", 23), users) - products := f.createCollection(getProductsCollectionDesc()) + products := f.getProductsCollectionDesc() _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) require.NoError(f.t, err) f.commitTxn() @@ -885,7 +882,7 @@ func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { doc := f.newUserDoc("John", 21) f.saveDocToCollection(doc, f.users) - f.users.fetcherFactory = tc.PrepareFetcher + f.users.(*collection).fetcherFactory = tc.PrepareFetcher oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() err := doc.Set(usersNameFieldName, "Islam") @@ -931,14 +928,14 @@ func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { f.createUserCollectionIndexOnName() f.createUserCollectionIndexOnAge() - f.users.fetcherFactory = func() fetcher.Fetcher { + f.users.(*collection).fetcherFactory = func() fetcher.Fetcher { f := fetcherMocks.NewStubbedFetcher(t) f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). RunAndReturn(func( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, mapping *core.DocumentMapping, @@ -999,7 +996,7 @@ func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { schemaVersionID: f.users.Schema().VersionID, } - f.users.fetcherFactory = func() fetcher.Fetcher { + f.users.(*collection).fetcherFactory = func() fetcher.Fetcher { df := fetcherMocks.NewStubbedFetcher(t) df.EXPECT().FetchNext(mock.Anything).Unset() df.EXPECT().FetchNext(mock.Anything).Return(&encodedDoc, fetcher.ExecInfo{}, nil) diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go index acd80bd041..67d5393c66 100644 --- a/db/p2p_collection_test.go +++ b/db/p2p_collection_test.go @@ -12,6 +12,7 @@ package db import ( "context" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -25,31 +26,18 @@ func newTestCollection( db *implicitTxnDB, name string, ) client.Collection { - desc := client.CollectionDescription{ - Name: name, - Schema: client.SchemaDescription{ - Name: name, - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - txn, err := db.db.NewTxn(ctx, false) + _, err := db.AddSchema( + ctx, + fmt.Sprintf( + `type %s { + Name: String + }`, + name, + ), + ) require.NoError(t, err) - col, err := db.db.createCollection(ctx, txn, desc) - require.NoError(t, err) - - err = txn.Commit(ctx) + col, err := db.GetCollectionByName(ctx, name) require.NoError(t, err) return col diff --git a/db/schema.go b/db/schema.go index 910f44f8c1..ec14393563 100644 --- a/db/schema.go +++ b/db/schema.go @@ -39,24 +39,29 @@ func (db *db) addSchema( txn datastore.Txn, schemaString string, ) ([]client.CollectionDescription, error) { - existingDescriptions, err := db.getCollectionDescriptions(ctx, txn) + existingCollections, err := db.getAllCollections(ctx, txn) if err != nil { return nil, err } - newDescriptions, err := db.parser.ParseSDL(ctx, schemaString) + existingDefinitions := make([]client.CollectionDefinition, len(existingCollections)) + for i := range existingCollections { + existingDefinitions[i] = existingCollections[i].Definition() + } + + newDefinitions, err := db.parser.ParseSDL(ctx, schemaString) if err != nil { return nil, err } - err = db.parser.SetSchema(ctx, txn, append(existingDescriptions, newDescriptions...)) + err = db.parser.SetSchema(ctx, txn, append(existingDefinitions, newDefinitions...)) if err != nil { return nil, err } - returnDescriptions := make([]client.CollectionDescription, len(newDescriptions)) - for i, desc := range newDescriptions { - col, err := db.createCollection(ctx, txn, desc) + returnDescriptions := make([]client.CollectionDescription, len(newDefinitions)) + for i, definition := range newDefinitions { + col, err := db.createCollection(ctx, txn, definition) if err != nil { return nil, err } @@ -67,29 +72,17 @@ func (db *db) addSchema( } func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error { - descriptions, err := db.getCollectionDescriptions(ctx, txn) - if err != nil { - return err - } - - return db.parser.SetSchema(ctx, txn, descriptions) -} - -func (db *db) getCollectionDescriptions( - ctx context.Context, - txn datastore.Txn, -) ([]client.CollectionDescription, error) { collections, err := db.getAllCollections(ctx, txn) if err != nil { - return nil, err + return err } - descriptions := make([]client.CollectionDescription, len(collections)) - for i, collection := range collections { - descriptions[i] = collection.Description() + definitions := make([]client.CollectionDefinition, len(collections)) + for i := range collections { + definitions[i] = collections[i].Definition() } - return descriptions, nil + return db.parser.SetSchema(ctx, txn, definitions) } // patchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions @@ -114,6 +107,11 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st return err } + existingSchemaByName := map[string]client.SchemaDescription{} + for _, col := range collectionsByName { + existingSchemaByName[col.Schema.Name] = col.Schema + } + // Here we swap out any string representations of enums for their integer values patch, err = substituteSchemaPatch(patch, collectionsByName) if err != nil { @@ -138,21 +136,33 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st return err } - newDescriptions := []client.CollectionDescription{} + newCollections := []client.CollectionDefinition{} + newSchemaByName := map[string]client.SchemaDescription{} for _, desc := range newDescriptionsByName { - newDescriptions = append(newDescriptions, desc) + def := client.CollectionDefinition{Description: desc, Schema: desc.Schema} + + newCollections = append(newCollections, def) + newSchemaByName[def.Schema.Name] = def.Schema } - for i, desc := range newDescriptions { - col, err := db.updateCollection(ctx, txn, collectionsByName, newDescriptionsByName, desc, setAsDefaultVersion) + for i, col := range newCollections { + col, err := db.updateCollection( + ctx, + txn, + collectionsByName, + existingSchemaByName, + newSchemaByName, + col, + setAsDefaultVersion, + ) if err != nil { return err } - newDescriptions[i] = col.Description() + newCollections[i] = col.Definition() } - return db.parser.SetSchema(ctx, txn, newDescriptions) + return db.parser.SetSchema(ctx, txn, newCollections) } func (db *db) getCollectionsByName( diff --git a/http/client.go b/http/client.go index 79ff9e559b..21006f2194 100644 --- a/http/client.go +++ b/http/client.go @@ -267,11 +267,11 @@ func (c *Client) GetCollectionByName(ctx context.Context, name client.Collection if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var definition client.CollectionDefinition + if err := c.http.requestJson(req, &definition); err != nil { return nil, err } - return &Collection{c.http, description}, nil + return &Collection{c.http, definition}, nil } func (c *Client) GetCollectionBySchemaID(ctx context.Context, schemaId string) (client.Collection, error) { @@ -282,11 +282,11 @@ func (c *Client) GetCollectionBySchemaID(ctx context.Context, schemaId string) ( if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var definition client.CollectionDefinition + if err := c.http.requestJson(req, &definition); err != nil { return nil, err } - return &Collection{c.http, description}, nil + return &Collection{c.http, definition}, nil } func (c *Client) GetCollectionByVersionID(ctx context.Context, versionId string) (client.Collection, error) { @@ -297,11 +297,11 @@ func (c *Client) GetCollectionByVersionID(ctx context.Context, versionId string) if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var definition client.CollectionDefinition + if err := c.http.requestJson(req, &definition); err != nil { return nil, err } - return &Collection{c.http, description}, nil + return &Collection{c.http, definition}, nil } func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, error) { @@ -311,7 +311,7 @@ func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, er if err != nil { return nil, err } - var descriptions []client.CollectionDescription + var descriptions []client.CollectionDefinition if err := c.http.requestJson(req, &descriptions); err != nil { return nil, err } diff --git a/http/client_collection.go b/http/client_collection.go index 9641157d1b..1bb1e9e29e 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -32,35 +32,39 @@ var _ client.Collection = (*Collection)(nil) // Collection implements the client.Collection interface over HTTP. type Collection struct { http *httpClient - desc client.CollectionDescription + def client.CollectionDefinition } func (c *Collection) Description() client.CollectionDescription { - return c.desc + return c.def.Description } func (c *Collection) Name() string { - return c.desc.Name + return c.Description().Name } func (c *Collection) Schema() client.SchemaDescription { - return c.desc.Schema + return c.def.Schema } func (c *Collection) ID() uint32 { - return c.desc.ID + return c.Description().ID } func (c *Collection) SchemaID() string { - return c.desc.Schema.SchemaID + return c.Schema().SchemaID +} + +func (c *Collection) Definition() client.CollectionDefinition { + return c.def } func (c *Collection) Create(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } @@ -82,13 +86,13 @@ func (c *Collection) Create(ctx context.Context, doc *client.Document) error { } func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) var docMapList []json.RawMessage for _, doc := range docs { // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } @@ -118,7 +122,7 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er } func (c *Collection) Update(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, doc.Key().String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, doc.Key().String()) body, err := doc.ToJSONPatch() if err != nil { @@ -148,7 +152,7 @@ func (c *Collection) Save(ctx context.Context, doc *client.Document) error { } func (c *Collection) Delete(ctx context.Context, docKey client.DocKey) (bool, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, docKey.String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, docKey.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -186,7 +190,7 @@ func (c *Collection) updateWith( ctx context.Context, request CollectionUpdateRequest, ) (*client.UpdateResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) body, err := json.Marshal(request) if err != nil { @@ -257,7 +261,7 @@ func (c *Collection) deleteWith( ctx context.Context, request CollectionDeleteRequest, ) (*client.DeleteResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) body, err := json.Marshal(request) if err != nil { @@ -302,7 +306,7 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo query.Add("show_deleted", "true") } - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, key.String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, key.String()) methodURL.RawQuery = query.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) @@ -324,12 +328,12 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { return &Collection{ http: c.http.withTxn(tx.ID()), - desc: c.desc, + def: c.def, } } func (c *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -381,7 +385,7 @@ func (c *Collection) CreateIndex( ctx context.Context, indexDesc client.IndexDescription, ) (client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes") + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") body, err := json.Marshal(&indexDesc) if err != nil { @@ -399,7 +403,7 @@ func (c *Collection) CreateIndex( } func (c *Collection) DropIndex(ctx context.Context, indexName string) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes", indexName) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes", indexName) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -410,7 +414,7 @@ func (c *Collection) DropIndex(ctx context.Context, indexName string) error { } func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes") + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -420,5 +424,5 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, if err := c.http.requestJson(req, &indexes); err != nil { return nil, err } - return c.desc.Indexes, nil + return c.Description().Indexes, nil } diff --git a/http/handler_store.go b/http/handler_store.go index 93563c2f90..6361a7b900 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -200,30 +200,30 @@ func (s *storeHandler) GetCollection(rw http.ResponseWriter, req *http.Request) responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) + responseJSON(rw, http.StatusOK, col.Definition()) case req.URL.Query().Has("schema_id"): col, err := store.GetCollectionBySchemaID(req.Context(), req.URL.Query().Get("schema_id")) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) + responseJSON(rw, http.StatusOK, col.Definition()) case req.URL.Query().Has("version_id"): col, err := store.GetCollectionByVersionID(req.Context(), req.URL.Query().Get("version_id")) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) + responseJSON(rw, http.StatusOK, col.Definition()) default: cols, err := store.GetAllCollections(req.Context()) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - colDesc := make([]client.CollectionDescription, len(cols)) + colDesc := make([]client.CollectionDefinition, len(cols)) for i, col := range cols { - colDesc[i] = col.Description() + colDesc[i] = col.Definition() } responseJSON(rw, http.StatusOK, colDesc) } diff --git a/lens/fetcher.go b/lens/fetcher.go index ee01aa7983..23adc8671d 100644 --- a/lens/fetcher.go +++ b/lens/fetcher.go @@ -34,7 +34,7 @@ type lensedFetcher struct { txn datastore.Txn - col *client.CollectionDescription + col client.Collection // Cache the fieldDescriptions mapped by name to allow for cheaper access within the fetcher loop fieldDescriptionsByName map[string]client.FieldDescription @@ -58,7 +58,7 @@ func NewFetcher(source fetcher.Fetcher, registry client.LensRegistry) fetcher.Fe func (f *lensedFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -67,12 +67,12 @@ func (f *lensedFetcher) Init( ) error { f.col = col - f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema.Fields)) + f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema().Fields)) // Add cache the field descriptions in reverse, allowing smaller-index fields to overwrite any later // ones. This should never really happen here, but it ensures the result is consistent with col.GetField // which returns the first one it finds with a matching name. - for i := len(col.Schema.Fields) - 1; i >= 0; i-- { - field := col.Schema.Fields[i] + for i := len(col.Schema().Fields) - 1; i >= 0; i-- { + field := col.Schema().Fields[i] f.fieldDescriptionsByName[field.Name] = field } @@ -81,11 +81,11 @@ func (f *lensedFetcher) Init( return err } - history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema.SchemaID, f.col.Schema.VersionID) + history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema().SchemaID, f.col.Schema().VersionID) if err != nil { return err } - f.lens = new(ctx, f.registry, f.col.Schema.VersionID, history) + f.lens = new(ctx, f.registry, f.col.Schema().VersionID, history) f.txn = txn for schemaVersionID := range history { @@ -100,7 +100,7 @@ func (f *lensedFetcher) Init( } } - f.targetVersionID = col.Schema.VersionID + f.targetVersionID = col.Schema().VersionID var innerFetcherFields []client.FieldDescription if f.hasMigrations { @@ -238,7 +238,7 @@ func (f *lensedFetcher) lensDocToEncodedDoc(docAsMap LensDoc) (fetcher.EncodedDo return &lensEncodedDocument{ key: []byte(key), - schemaVersionID: f.col.Schema.VersionID, + schemaVersionID: f.col.Schema().VersionID, status: status, properties: properties, }, nil @@ -283,7 +283,7 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string } datastoreKeyBase := core.DataStoreKey{ - CollectionID: f.col.IDString(), + CollectionID: f.col.Description().IDString(), DocKey: dockey, InstanceType: core.ValueKey, } diff --git a/net/process.go b/net/process.go index a2fd446cfe..c07800b51f 100644 --- a/net/process.go +++ b/net/process.go @@ -102,7 +102,7 @@ func initCRDTForType( core.COMPOSITE_NAMESPACE, ) } else { - fd, ok := description.Schema.GetField(field) + fd, ok := col.Schema().GetField(field) if !ok { return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey)) } diff --git a/planner/commit.go b/planner/commit.go index e6216e2b43..c2cff28c30 100644 --- a/planner/commit.go +++ b/planner/commit.go @@ -333,7 +333,7 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L return core.Doc{}, nil, err } - field, ok := c.Description().Schema.GetField(fieldName.(string)) + field, ok := c.Schema().GetField(fieldName.(string)) if !ok { return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) } diff --git a/planner/datasource.go b/planner/datasource.go index 862f43bd33..72ac7579b4 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -15,15 +15,9 @@ import ( "github.com/sourcenetwork/defradb/planner/mapper" ) -// sourceInfo stores info about the data source -type sourceInfo struct { - collectionDescription client.CollectionDescription - // and more -} - type planSource struct { - info sourceInfo - plan planNode + collection client.Collection + plan planNode } func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { @@ -43,9 +37,7 @@ func (p *Planner) getCollectionScanPlan(mapperSelect *mapper.Select) (planSource } return planSource{ - plan: scan, - info: sourceInfo{ - collectionDescription: col.Description(), - }, + plan: scan, + collection: col, }, nil } diff --git a/planner/mapper/descriptions.go b/planner/mapper/descriptions.go deleted file mode 100644 index e7edd865cd..0000000000 --- a/planner/mapper/descriptions.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package mapper - -import ( - "context" - "encoding/json" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/errors" -) - -// DescriptionsRepo is a cache of previously requested collection descriptions -// that can be used to reduce multiple reads of the same collection description. -type DescriptionsRepo struct { - ctx context.Context - txn datastore.Txn - - collectionDescriptionsByName map[string]client.CollectionDescription -} - -// NewDescriptionsRepo instantiates a new DescriptionsRepo with the given context and transaction. -func NewDescriptionsRepo(ctx context.Context, txn datastore.Txn) *DescriptionsRepo { - return &DescriptionsRepo{ - ctx: ctx, - txn: txn, - collectionDescriptionsByName: map[string]client.CollectionDescription{}, - } -} - -// getCollectionDesc returns the description of the collection with the given name. -// -// Will return nil and an error if a description of the given name is not found. Will first look -// in the repo's cache for the description before doing a query operation on the datastore. -func (r *DescriptionsRepo) getCollectionDesc(name string) (client.CollectionDescription, error) { - collectionKey := core.NewCollectionKey(name) - var desc client.CollectionDescription - schemaVersionIdBytes, err := r.txn.Systemstore().Get(r.ctx, collectionKey.ToDS()) - if err != nil { - return desc, errors.Wrap("failed to get collection description", err) - } - - schemaVersionId := string(schemaVersionIdBytes) - schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionId) - buf, err := r.txn.Systemstore().Get(r.ctx, schemaVersionKey.ToDS()) - if err != nil { - return desc, err - } - - err = json.Unmarshal(buf, &desc) - if err != nil { - return desc, err - } - - return desc, nil -} diff --git a/planner/mapper/mapper.go b/planner/mapper/mapper.go index b6f80a55a2..418c0c5c57 100644 --- a/planner/mapper/mapper.go +++ b/planner/mapper/mapper.go @@ -21,7 +21,6 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" ) var ( @@ -32,10 +31,9 @@ var ( // // In the process of doing so it will construct the document map required to access the data // yielded by the [Select]. -func ToSelect(ctx context.Context, txn datastore.Txn, selectRequest *request.Select) (*Select, error) { - descriptionsRepo := NewDescriptionsRepo(ctx, txn) +func ToSelect(ctx context.Context, store client.Store, selectRequest *request.Select) (*Select, error) { // the top-level select will always have index=0, and no parent collection name - return toSelect(descriptionsRepo, 0, selectRequest, "") + return toSelect(ctx, store, 0, selectRequest, "") } // toSelect converts the given [parser.Select] into a [Select]. @@ -43,29 +41,30 @@ func ToSelect(ctx context.Context, txn datastore.Txn, selectRequest *request.Sel // In the process of doing so it will construct the document map required to access the data // yielded by the [Select]. func toSelect( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, thisIndex int, selectRequest *request.Select, parentCollectionName string, ) (*Select, error) { - collectionName, err := getCollectionName(descriptionsRepo, selectRequest, parentCollectionName) + collectionName, err := getCollectionName(ctx, store, selectRequest, parentCollectionName) if err != nil { return nil, err } - mapping, desc, err := getTopLevelInfo(descriptionsRepo, selectRequest, collectionName) + mapping, collection, err := getTopLevelInfo(ctx, store, selectRequest, collectionName) if err != nil { return nil, err } - fields, aggregates, err := getRequestables(selectRequest, mapping, desc, descriptionsRepo) + fields, aggregates, err := getRequestables(ctx, selectRequest, mapping, collection, store) if err != nil { return nil, err } // Needs to be done before resolving aggregates, else filter conversion may fail there filterDependencies, err := resolveFilterDependencies( - descriptionsRepo, collectionName, selectRequest.Filter, mapping, fields) + ctx, store, collectionName, selectRequest.Filter, mapping, fields) if err != nil { return nil, err } @@ -73,28 +72,31 @@ func toSelect( // Resolve order dependencies that may have been missed due to not being rendered. err = resolveOrderDependencies( - descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields) + ctx, store, collectionName, selectRequest.OrderBy, mapping, &fields) if err != nil { return nil, err } aggregates = appendUnderlyingAggregates(aggregates, mapping) fields, err = resolveAggregates( + ctx, selectRequest, aggregates, fields, mapping, - desc, - descriptionsRepo, + collection, + store, ) if err != nil { return nil, err } - fields, err = resolveSecondaryRelationIDs(descriptionsRepo, desc, mapping, fields) - if err != nil { - return nil, err + if collection != nil { + fields, err = resolveSecondaryRelationIDs(ctx, store, collection, mapping, fields) + if err != nil { + return nil, err + } } // Resolve groupBy mappings i.e. alias remapping and handle missed inner group. @@ -102,7 +104,10 @@ func toSelect( groupByFields := selectRequest.GroupBy.Value().Fields // Remap all alias field names to use their internal field name mappings. for index, groupByField := range groupByFields { - fieldDesc, ok := desc.Schema.GetField(groupByField) + if collection == nil { + continue + } + fieldDesc, ok := collection.Schema().GetField(groupByField) if ok && fieldDesc.IsObject() && !fieldDesc.IsObjectArray() { groupByFields[index] = groupByField + request.RelatedObjectID } else if ok && fieldDesc.IsObjectArray() { @@ -135,7 +140,8 @@ func toSelect( // resolveOrderDependencies will map fields that were missed due to them not being requested. // Modifies the consumed existingFields and mapping accordingly. func resolveOrderDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, descName string, source immutable.Option[request.OrderBy], mapping *core.DocumentMapping, @@ -160,7 +166,7 @@ outer: joinField := fields[0] // ensure the child select is resolved for this order join - innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, currentExistingFields) + innerSelect, err := resolveChildOrder(ctx, store, descName, joinField, mapping, currentExistingFields) if err != nil { return err } @@ -178,7 +184,7 @@ outer: joinField := fields[0] // ensure the child select is resolved for this order join - innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, existingFields) + innerSelect, err := resolveChildOrder(ctx, store, descName, joinField, mapping, existingFields) if err != nil { return err } @@ -203,7 +209,8 @@ outer: // given a type join field, ensure its mapping exists // and add a coorsponding select field(s) func resolveChildOrder( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, descName string, orderChildField string, mapping *core.DocumentMapping, @@ -221,7 +228,7 @@ func resolveChildOrder( Name: orderChildField, }, } - innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) + innerSelect, err := toSelect(ctx, store, index, &dummyJoinFieldSelect, descName) if err != nil { return nil, err } @@ -250,12 +257,13 @@ func resolveChildOrder( // append the new target field as well as the aggregate. The mapping will also be // updated with any new fields/aggregates. func resolveAggregates( + ctx context.Context, selectRequest *request.Select, aggregates []*aggregateRequest, inputFields []Requestable, mapping *core.DocumentMapping, - desc *client.CollectionDescription, - descriptionsRepo *DescriptionsRepo, + collection client.Collection, + store client.Store, ) ([]Requestable, error) { fields := inputFields dependenciesByParentId := map[int][]int{} @@ -274,7 +282,12 @@ func resolveAggregates( var hasHost bool var convertedFilter *Filter if childIsMapped { - fieldDesc, isField := desc.Schema.GetField(target.hostExternalName) + var fieldDesc client.FieldDescription + var isField bool + if collection != nil { + fieldDesc, isField = collection.Schema().GetField(target.hostExternalName) + } + if isField && !fieldDesc.IsObject() { var order *OrderBy if target.order.HasValue() && len(target.order.Value().Conditions) > 0 { @@ -326,24 +339,29 @@ func resolveAggregates( }, } - childCollectionName, err := getCollectionName(descriptionsRepo, hostSelectRequest, desc.Name) + var collectionName string + if collection != nil { + collectionName = collection.Name() + } + + childCollectionName, err := getCollectionName(ctx, store, hostSelectRequest, collectionName) if err != nil { return nil, err } mapAggregateNestedTargets(target, hostSelectRequest, selectRequest.Root) - childMapping, childDesc, err := getTopLevelInfo(descriptionsRepo, hostSelectRequest, childCollectionName) + childMapping, childDesc, err := getTopLevelInfo(ctx, store, hostSelectRequest, childCollectionName) if err != nil { return nil, err } - childFields, _, err := getRequestables(hostSelectRequest, childMapping, childDesc, descriptionsRepo) + childFields, _, err := getRequestables(ctx, hostSelectRequest, childMapping, childDesc, store) if err != nil { return nil, err } err = resolveOrderDependencies( - descriptionsRepo, childCollectionName, target.order, childMapping, &childFields) + ctx, store, childCollectionName, target.order, childMapping, &childFields) if err != nil { return nil, err } @@ -587,10 +605,11 @@ func appendIfNotExists( // and aggregateRequests from the given selectRequest.Fields slice. It also mutates the // consumed mapping data. func getRequestables( + ctx context.Context, selectRequest *request.Select, mapping *core.DocumentMapping, - desc *client.CollectionDescription, - descriptionsRepo *DescriptionsRepo, + collection client.Collection, + store client.Store, ) (fields []Requestable, aggregates []*aggregateRequest, err error) { for _, field := range selectRequest.Fields { switch f := field.(type) { @@ -611,8 +630,12 @@ func getRequestables( }) case *request.Select: index := mapping.GetNextIndex() + var parentCollectionName string + if collection != nil { + parentCollectionName = collection.Name() + } - innerSelect, err := toSelect(descriptionsRepo, index, f, desc.Name) + innerSelect, err := toSelect(ctx, store, index, f, parentCollectionName) if err != nil { return nil, nil, err } @@ -676,7 +699,8 @@ func getAggregateRequests(index int, aggregate *request.Aggregate) (aggregateReq // getCollectionName returns the name of the selectRequest collection. This may be empty // if this is a commit request. func getCollectionName( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, selectRequest *request.Select, parentCollectionName string, ) (string, error) { @@ -692,12 +716,12 @@ func getCollectionName( } if parentCollectionName != "" { - parentDescription, err := descriptionsRepo.getCollectionDesc(parentCollectionName) + parentCollection, err := store.GetCollectionByName(ctx, parentCollectionName) if err != nil { return "", err } - hostFieldDesc, parentHasField := parentDescription.Schema.GetField(selectRequest.Name) + hostFieldDesc, parentHasField := parentCollection.Schema().GetField(selectRequest.Name) if parentHasField && hostFieldDesc.RelationType != 0 { // If this field exists on the parent, and it is a child object // then this collection name is the collection name of the child. @@ -710,28 +734,29 @@ func getCollectionName( // getTopLevelInfo returns the collection description and maps the fields directly on the object. func getTopLevelInfo( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, selectRequest *request.Select, collectionName string, -) (*core.DocumentMapping, *client.CollectionDescription, error) { +) (*core.DocumentMapping, client.Collection, error) { mapping := core.NewDocumentMapping() if _, isAggregate := request.Aggregates[selectRequest.Name]; isAggregate { // If this is a (top-level) aggregate, then it will have no collection // description, and no top-level fields, so we return an empty mapping only - return mapping, &client.CollectionDescription{}, nil + return mapping, nil, nil } if selectRequest.Root == request.ObjectSelection { mapping.Add(core.DocKeyFieldIndex, request.KeyFieldName) - desc, err := descriptionsRepo.getCollectionDesc(collectionName) + collection, err := store.GetCollectionByName(ctx, collectionName) if err != nil { return nil, nil, err } // Map all fields from schema into the map as they are fetched automatically - for _, f := range desc.Schema.Fields { + for _, f := range collection.Schema().Fields { if f.IsObject() { // Objects are skipped, as they are not fetched by default and // have to be requested via selects. @@ -746,7 +771,7 @@ func getTopLevelInfo( mapping.Add(mapping.GetNextIndex(), request.DeletedFieldName) - return mapping, &desc, nil + return mapping, collection, nil } if selectRequest.Name == request.LinksFieldName { @@ -767,11 +792,12 @@ func getTopLevelInfo( mapping.SetTypeName(request.CommitTypeName) } - return mapping, &client.CollectionDescription{}, nil + return mapping, nil, nil } func resolveFilterDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, source immutable.Option[request.Filter], mapping *core.DocumentMapping, @@ -782,7 +808,8 @@ func resolveFilterDependencies( } return resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, source.Value().Conditions, mapping, @@ -792,7 +819,8 @@ func resolveFilterDependencies( } func resolveInnerFilterDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, source map[string]any, mapping *core.DocumentMapping, @@ -806,7 +834,8 @@ func resolveInnerFilterDependencies( compoundFilter := source[key].([]any) for _, innerFilter := range compoundFilter { innerFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, innerFilter.(map[string]any), mapping, @@ -824,7 +853,8 @@ func resolveInnerFilterDependencies( } else if key == request.FilterOpNot { notFilter := source[key].(map[string]any) innerFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, notFilter, mapping, @@ -868,7 +898,7 @@ func resolveInnerFilterDependencies( } } else { var err error - childSelect, err = constructEmptyJoin(descriptionsRepo, parentCollectionName, mapping, key) + childSelect, err = constructEmptyJoin(ctx, store, parentCollectionName, mapping, key) if err != nil { return nil, err } @@ -885,13 +915,14 @@ func resolveInnerFilterDependencies( } dummyParsed := &request.Select{Field: request.Field{Name: key}} - childCollectionName, err := getCollectionName(descriptionsRepo, dummyParsed, parentCollectionName) + childCollectionName, err := getCollectionName(ctx, store, dummyParsed, parentCollectionName) if err != nil { return nil, err } childFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, childCollectionName, childFilter, childSelect.DocumentMapping, @@ -910,7 +941,8 @@ func resolveInnerFilterDependencies( // constructEmptyJoin constructs a valid empty join with no requested fields. func constructEmptyJoin( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, parentMapping *core.DocumentMapping, name string, @@ -923,12 +955,12 @@ func constructEmptyJoin( }, } - childCollectionName, err := getCollectionName(descriptionsRepo, dummyParsed, parentCollectionName) + childCollectionName, err := getCollectionName(ctx, store, dummyParsed, parentCollectionName) if err != nil { return nil, err } - childMapping, _, err := getTopLevelInfo(descriptionsRepo, dummyParsed, childCollectionName) + childMapping, _, err := getTopLevelInfo(ctx, store, dummyParsed, childCollectionName) if err != nil { return nil, err } @@ -955,8 +987,9 @@ func constructEmptyJoin( // // They copying itself is handled within [typeJoinOne]. func resolveSecondaryRelationIDs( - descriptionsRepo *DescriptionsRepo, - desc *client.CollectionDescription, + ctx context.Context, + store client.Store, + collection client.Collection, mapping *core.DocumentMapping, requestables []Requestable, ) ([]Requestable, error) { @@ -968,7 +1001,7 @@ func resolveSecondaryRelationIDs( continue } - fieldDesc, descFound := desc.Schema.GetField(existingField.Name) + fieldDesc, descFound := collection.Schema().GetField(existingField.Name) if !descFound { continue } @@ -977,7 +1010,7 @@ func resolveSecondaryRelationIDs( continue } - objectFieldDesc, descFound := desc.Schema.GetField( + objectFieldDesc, descFound := collection.Schema().GetField( strings.TrimSuffix(existingField.Name, request.RelatedObjectID), ) if !descFound { @@ -995,7 +1028,7 @@ func resolveSecondaryRelationIDs( continue } - siblingFieldDesc, descFound := desc.Schema.GetField(siblingSelect.Field.Name) + siblingFieldDesc, descFound := collection.Schema().GetField(siblingSelect.Field.Name) if !descFound { continue } @@ -1017,8 +1050,9 @@ func resolveSecondaryRelationIDs( // We only require the dockey of the related object, so an empty join is all we need. join, err := constructEmptyJoin( - descriptionsRepo, - desc.Name, + ctx, + store, + collection.Name(), mapping, objectFieldName, ) @@ -1039,10 +1073,10 @@ func resolveSecondaryRelationIDs( // yielded by the [Select] embedded in the [CommitSelect]. func ToCommitSelect( ctx context.Context, - txn datastore.Txn, + store client.Store, selectRequest *request.CommitSelect, ) (*CommitSelect, error) { - underlyingSelect, err := ToSelect(ctx, txn, selectRequest.ToSelect()) + underlyingSelect, err := ToSelect(ctx, store, selectRequest.ToSelect()) if err != nil { return nil, err } @@ -1059,8 +1093,8 @@ func ToCommitSelect( // // In the process of doing so it will construct the document map required to access the data // yielded by the [Select] embedded in the [Mutation]. -func ToMutation(ctx context.Context, txn datastore.Txn, mutationRequest *request.ObjectMutation) (*Mutation, error) { - underlyingSelect, err := ToSelect(ctx, txn, mutationRequest.ToSelect()) +func ToMutation(ctx context.Context, store client.Store, mutationRequest *request.ObjectMutation) (*Mutation, error) { + underlyingSelect, err := ToSelect(ctx, store, mutationRequest.ToSelect()) if err != nil { return nil, err } diff --git a/planner/planner.go b/planner/planner.go index 7821b5aaaf..b066e1f0e3 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -114,7 +114,7 @@ func (p *Planner) newPlan(stmt any) (planNode, error) { return p.newPlan(n.Selections[0]) case *request.Select: - m, err := mapper.ToSelect(p.ctx, p.txn, n) + m, err := mapper.ToSelect(p.ctx, p.db, n) if err != nil { return nil, err } @@ -129,14 +129,14 @@ func (p *Planner) newPlan(stmt any) (planNode, error) { return p.Select(m) case *request.CommitSelect: - m, err := mapper.ToCommitSelect(p.ctx, p.txn, n) + m, err := mapper.ToCommitSelect(p.ctx, p.db, n) if err != nil { return nil, err } return p.CommitSelect(m) case *request.ObjectMutation: - m, err := mapper.ToMutation(p.ctx, p.txn, n) + m, err := mapper.ToMutation(p.ctx, p.db, n) if err != nil { return nil, err } @@ -338,8 +338,9 @@ func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan node.documentMapping, ) slct := node.subType.(*selectTopNode).selectNode - desc := slct.sourceInfo.collectionDescription - indexedFields := desc.CollectIndexedFields(&desc.Schema) + desc := slct.collection.Description() + schema := slct.collection.Schema() + indexedFields := desc.CollectIndexedFields(&schema) for _, indField := range indexedFields { if ind, ok := filteredSubFields[indField.Name]; ok { subInd := node.documentMapping.FirstIndexOfName(node.subTypeName) @@ -412,7 +413,7 @@ func (p *Planner) expandGroupNodePlan(topNodeSelect *selectTopNode) error { childSelect, pipe, false, - &topNodeSelect.selectNode.sourceInfo, + topNodeSelect.selectNode.collection, ) if err != nil { return err diff --git a/planner/scan.go b/planner/scan.go index f9a80705cb..64a534da6d 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -38,8 +38,8 @@ type scanNode struct { documentIterator docMapper - p *Planner - desc client.CollectionDescription + p *Planner + col client.Collection fields []client.FieldDescription @@ -65,7 +65,7 @@ func (n *scanNode) Init() error { if err := n.fetcher.Init( n.p.ctx, n.p.txn, - &n.desc, + n.col, n.fields, n.filter, n.slct.DocumentMapping, @@ -77,8 +77,8 @@ func (n *scanNode) Init() error { return n.initScan() } -func (n *scanNode) initCollection(desc client.CollectionDescription) error { - n.desc = desc +func (n *scanNode) initCollection(col client.Collection) error { + n.col = col return n.initFields(n.slct.Fields) } @@ -104,7 +104,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { if target.Filter != nil { fieldDescs, err := parser.ParseFilterFieldsForDescription( target.Filter.ExternalConditions, - n.desc.Schema, + n.col.Schema(), ) if err != nil { return err @@ -125,7 +125,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { } func (n *scanNode) tryAddField(fieldName string) bool { - fd, ok := n.desc.Schema.GetField(fieldName) + fd, ok := n.col.Schema().GetField(fieldName) if !ok { // skip fields that are not part of the // schema description. The scanner (and fetcher) @@ -152,7 +152,7 @@ func (scan *scanNode) initFetcher( var indexFilter *mapper.Filter scan.filter, indexFilter = filter.SplitByField(scan.filter, field) if indexFilter != nil { - fieldDesc, _ := scan.desc.Schema.GetField(indexedField.Value().Name) + fieldDesc, _ := scan.col.Schema().GetField(indexedField.Value().Name) f = fetcher.NewIndexFetcher(f, fieldDesc, indexFilter) } } @@ -170,7 +170,7 @@ func (n *scanNode) Start() error { func (n *scanNode) initScan() error { if !n.spans.HasValue { - start := base.MakeCollectionKey(n.desc) + start := base.MakeCollectionKey(n.col.Description()) n.spans = core.NewSpans(core.NewSpan(start, start.PrefixEnd())) } @@ -252,8 +252,8 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { } // Add the collection attributes. - simpleExplainMap[collectionNameLabel] = n.desc.Name - simpleExplainMap[collectionIDLabel] = n.desc.IDString() + simpleExplainMap[collectionNameLabel] = n.col.Name() + simpleExplainMap[collectionIDLabel] = n.col.Description().IDString() // Add the spans attribute. simpleExplainMap[spansLabel] = n.explainSpans() @@ -298,7 +298,11 @@ func (p *Planner) Scan( docMapper: docMapper{mapperSelect.DocumentMapping}, } - err := scan.initCollection(colDesc) + col, err := p.db.GetCollectionByName(p.ctx, mapperSelect.CollectionName) + if err != nil { + return nil, err + } + err = scan.initCollection(col) if err != nil { return nil, err } diff --git a/planner/select.go b/planner/select.go index 21524ed31f..20c0dd43ba 100644 --- a/planner/select.go +++ b/planner/select.go @@ -102,9 +102,7 @@ type selectNode struct { // was created origSource planNode - // cache information about the original data source - // collection name, meta-data, etc. - sourceInfo sourceInfo + collection client.Collection // top level filter expression // filter is split between select, scan, and typeIndexJoin. @@ -245,7 +243,7 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { } n.source = sourcePlan.plan n.origSource = sourcePlan.plan - n.sourceInfo = sourcePlan.info + n.collection = sourcePlan.collection // split filter // apply the root filter to the source @@ -279,7 +277,7 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { // instead of a prefix scan + filter via the Primary Index (0), like here: spans := make([]core.Span, len(n.selectReq.DocKeys.Value())) for i, docKey := range n.selectReq.DocKeys.Value() { - dockeyIndexKey := base.MakeDocKey(sourcePlan.info.collectionDescription, docKey) + dockeyIndexKey := base.MakeDocKey(sourcePlan.collection.Description(), docKey) spans[i] = core.NewSpan(dockeyIndexKey, dockeyIndexKey.PrefixEnd()) } origScan.Spans(core.NewSpans(spans...)) @@ -300,7 +298,8 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { func findFilteredByIndexedField(scanNode *scanNode) immutable.Option[client.FieldDescription] { if scanNode.filter != nil { - indexedFields := scanNode.desc.CollectIndexedFields(&scanNode.desc.Schema) + schema := scanNode.col.Schema() + indexedFields := scanNode.col.Description().CollectIndexedFields(&schema) for i := range indexedFields { typeIndex := scanNode.documentMapping.FirstIndexOfName(indexedFields[i].Name) if scanNode.filter.HasIndex(typeIndex) { @@ -404,7 +403,7 @@ func (p *Planner) SelectFromSource( selectReq *mapper.Select, source planNode, fromCollection bool, - providedSourceInfo *sourceInfo, + collection client.Collection, ) (planNode, error) { s := &selectNode{ planner: p, @@ -419,8 +418,8 @@ func (p *Planner) SelectFromSource( orderBy := selectReq.OrderBy groupBy := selectReq.GroupBy - if providedSourceInfo != nil { - s.sourceInfo = *providedSourceInfo + if collection != nil { + s.collection = collection } if fromCollection { @@ -429,7 +428,7 @@ func (p *Planner) SelectFromSource( return nil, err } - s.sourceInfo = sourceInfo{col.Description()} + s.collection = col } aggregates, err := s.initFields(selectReq) diff --git a/planner/type_join.go b/planner/type_join.go index 6e5d9a0d49..47ba07e96b 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -81,8 +81,7 @@ func (p *Planner) makeTypeIndexJoin( var joinPlan planNode var err error - desc := parent.sourceInfo.collectionDescription - typeFieldDesc, ok := desc.Schema.GetField(subType.Name) + typeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -245,7 +244,7 @@ func (p *Planner) makeTypeJoinOne( } // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -258,11 +257,13 @@ func (p *Planner) makeTypeJoinOne( if err != nil { return nil, err } + subTypeSchema := subTypeCol.Schema() subTypeField, subTypeFieldNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, - parent.sourceInfo.collectionDescription.Name, + parent.collection.Name(), subTypeFieldDesc.Name, + &subTypeSchema, ) if !subTypeFieldNameFound { return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) @@ -382,7 +383,7 @@ func (p *Planner) makeTypeJoinMany( return nil, err } - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -391,11 +392,13 @@ func (p *Planner) makeTypeJoinMany( if err != nil { return nil, err } + subTypeSchema := subTypeCol.Schema() rootField, rootNameFound := subTypeCol.Description().GetFieldByRelation( subTypeFieldDesc.RelationName, - parent.sourceInfo.collectionDescription.Name, + parent.collection.Name(), subTypeFieldDesc.Name, + &subTypeSchema, ) if !rootNameFound { @@ -441,7 +444,7 @@ func fetchPrimaryDoc(node, subNode planNode, parentProp string) (bool, error) { if scan == nil { return false, nil } - rootDocKey := base.MakeDocKey(scan.desc, docKeyStr) + rootDocKey := base.MakeDocKey(scan.col.Description(), docKeyStr) spans := core.NewSpans(core.NewSpan(rootDocKey, rootDocKey.PrefixEnd())) diff --git a/request/graphql/parser.go b/request/graphql/parser.go index ddd13d9e62..743c3eab97 100644 --- a/request/graphql/parser.go +++ b/request/graphql/parser.go @@ -104,13 +104,13 @@ func (p *parser) Parse(ast *ast.Document) (*request.Request, []error) { } func (p *parser) ParseSDL(ctx context.Context, schemaString string) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { return schema.FromString(ctx, schemaString) } -func (p *parser) SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDescription) error { +func (p *parser) SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDefinition) error { schemaManager, err := schema.NewSchemaManager() if err != nil { return err diff --git a/request/graphql/schema/collection.go b/request/graphql/schema/collection.go index 00287c4454..d5b55fb6da 100644 --- a/request/graphql/schema/collection.go +++ b/request/graphql/schema/collection.go @@ -26,7 +26,7 @@ import ( // FromString parses a GQL SDL string into a set of collection descriptions. func FromString(ctx context.Context, schemaString string) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { source := source.NewSource(&source.Source{ @@ -47,11 +47,11 @@ func FromString(ctx context.Context, schemaString string) ( // fromAst parses a GQL AST into a set of collection descriptions. func fromAst(ctx context.Context, doc *ast.Document) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { relationManager := NewRelationManager() - descriptions := []client.CollectionDescription{} + definitions := []client.CollectionDefinition{} for _, def := range doc.Definitions { switch defType := def.(type) { @@ -61,7 +61,7 @@ func fromAst(ctx context.Context, doc *ast.Document) ( return nil, err } - descriptions = append(descriptions, description) + definitions = append(definitions, description) default: // Do nothing, ignore it and continue @@ -72,12 +72,12 @@ func fromAst(ctx context.Context, doc *ast.Document) ( // The details on the relations between objects depend on both sides // of the relationship. The relation manager handles this, and must be applied // after all the collections have been processed. - err := finalizeRelations(relationManager, descriptions) + err := finalizeRelations(relationManager, definitions) if err != nil { return nil, err } - return descriptions, nil + return definitions, nil } // fromAstDefinition parses a AST object definition into a set of collection descriptions. @@ -85,7 +85,7 @@ func fromAstDefinition( ctx context.Context, relationManager *RelationManager, def *ast.ObjectDefinition, -) (client.CollectionDescription, error) { +) (client.CollectionDefinition, error) { fieldDescriptions := []client.FieldDescription{ { Name: request.KeyFieldName, @@ -98,7 +98,7 @@ func fromAstDefinition( for _, field := range def.Fields { tmpFieldsDescriptions, err := fieldsFromAST(field, relationManager, def) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } fieldDescriptions = append(fieldDescriptions, tmpFieldsDescriptions...) @@ -107,7 +107,7 @@ func fromAstDefinition( if directive.Name.Value == types.IndexDirectiveLabel { index, err := fieldIndexFromAST(field, directive) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } indexDescriptions = append(indexDescriptions, index) } @@ -129,19 +129,21 @@ func fromAstDefinition( if directive.Name.Value == types.IndexDirectiveLabel { index, err := indexFromAST(directive) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } indexDescriptions = append(indexDescriptions, index) } } - return client.CollectionDescription{ - Name: def.Name.Value, + return client.CollectionDefinition{ + Description: client.CollectionDescription{ + Name: def.Name.Value, + Indexes: indexDescriptions, + }, Schema: client.SchemaDescription{ Name: def.Name.Value, Fields: fieldDescriptions, }, - Indexes: indexDescriptions, }, nil } @@ -424,9 +426,9 @@ func getRelationshipName( return genRelationName(hostName, targetName) } -func finalizeRelations(relationManager *RelationManager, descriptions []client.CollectionDescription) error { - for _, description := range descriptions { - for i, field := range description.Schema.Fields { +func finalizeRelations(relationManager *RelationManager, definitions []client.CollectionDefinition) error { + for _, definition := range definitions { + for i, field := range definition.Schema.Fields { if field.RelationType == 0 || field.RelationType&client.Relation_Type_INTERNAL_ID != 0 { continue } @@ -447,7 +449,7 @@ func finalizeRelations(relationManager *RelationManager, descriptions []client.C } field.RelationType = rel.Kind() | fieldRelationType - description.Schema.Fields[i] = field + definition.Schema.Fields[i] = field } } diff --git a/request/graphql/schema/descriptions_test.go b/request/graphql/schema/descriptions_test.go index 2ce5e55dc9..2368b58c27 100644 --- a/request/graphql/schema/descriptions_test.go +++ b/request/graphql/schema/descriptions_test.go @@ -30,9 +30,12 @@ func TestSingleSimpleType(t *testing.T) { verified: Boolean } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -58,7 +61,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -77,9 +79,12 @@ func TestSingleSimpleType(t *testing.T) { rating: Float } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -105,10 +110,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -134,7 +141,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -153,9 +159,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -190,10 +199,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -228,7 +239,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -247,9 +257,12 @@ func TestSingleSimpleType(t *testing.T) { rating: Float } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -275,10 +288,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -304,7 +319,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -323,9 +337,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book @relation(name:"book_authors") } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -360,10 +377,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -398,7 +417,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -417,9 +435,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -454,10 +475,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -492,7 +515,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -511,9 +533,12 @@ func TestSingleSimpleType(t *testing.T) { published: [Book] } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -548,10 +573,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -580,7 +607,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -599,12 +625,12 @@ func runCreateDescriptionTest(t *testing.T, testcase descriptionTestCase) { assert.Equal(t, len(descs), len(testcase.targetDescs), testcase.description) for i, d := range descs { - assert.Equal(t, testcase.targetDescs[i], d, testcase.description) + assert.Equal(t, testcase.targetDescs[i].Description, d.Description, testcase.description) } } type descriptionTestCase struct { description string sdl string - targetDescs []client.CollectionDescription + targetDescs []client.CollectionDefinition } diff --git a/request/graphql/schema/generate.go b/request/graphql/schema/generate.go index e30693b3de..5ccc4a897a 100644 --- a/request/graphql/schema/generate.go +++ b/request/graphql/schema/generate.go @@ -47,7 +47,7 @@ func (m *SchemaManager) NewGenerator() *Generator { // Generate generates the query-op and mutation-op type definitions from // the given CollectionDescriptions. -func (g *Generator) Generate(ctx context.Context, collections []client.CollectionDescription) ([]*gql.Object, error) { +func (g *Generator) Generate(ctx context.Context, collections []client.CollectionDefinition) ([]*gql.Object, error) { typeMapBeforeMutation := g.manager.schema.TypeMap() typesBeforeMutation := make(map[string]any, len(typeMapBeforeMutation)) @@ -79,7 +79,7 @@ func (g *Generator) Generate(ctx context.Context, collections []client.Collectio // generate generates the query-op and mutation-op type definitions from // the given CollectionDescriptions. -func (g *Generator) generate(ctx context.Context, collections []client.CollectionDescription) ([]*gql.Object, error) { +func (g *Generator) generate(ctx context.Context, collections []client.CollectionDefinition) ([]*gql.Object, error) { // build base types defs, err := g.buildTypes(ctx, collections) if err != nil { @@ -354,7 +354,7 @@ func (g *Generator) createExpandedFieldList( // extract and return the correct gql.Object type(s) func (g *Generator) buildTypes( ctx context.Context, - collections []client.CollectionDescription, + collections []client.CollectionDefinition, ) ([]*gql.Object, error) { // @todo: Check for duplicate named defined types in the TypeMap // get all the defined types from the AST @@ -367,12 +367,12 @@ func (g *Generator) buildTypes( fieldDescriptions := collection.Schema.Fields // check if type exists - if _, ok := g.manager.schema.TypeMap()[collection.Name]; ok { - return nil, NewErrSchemaTypeAlreadyExist(collection.Name) + if _, ok := g.manager.schema.TypeMap()[collection.Description.Name]; ok { + return nil, NewErrSchemaTypeAlreadyExist(collection.Description.Name) } objconf := gql.ObjectConfig{ - Name: collection.Name, + Name: collection.Description.Name, } // Wrap field definition in a thunk so we can @@ -435,9 +435,9 @@ func (g *Generator) buildTypes( Type: gql.Boolean, } - gqlType, ok := g.manager.schema.TypeMap()[collection.Name] + gqlType, ok := g.manager.schema.TypeMap()[collection.Description.Name] if !ok { - return nil, NewErrObjectNotFoundDuringThunk(collection.Name) + return nil, NewErrObjectNotFoundDuringThunk(collection.Description.Name) } fields[request.GroupFieldName] = &gql.Field{ diff --git a/request/graphql/schema/index_test.go b/request/graphql/schema/index_test.go index 379b84647d..155a17fbf6 100644 --- a/request/graphql/schema/index_test.go +++ b/request/graphql/schema/index_test.go @@ -276,9 +276,9 @@ func parseIndexAndTest(t *testing.T, testCase indexTestCase) { cols, err := FromString(ctx, testCase.sdl) assert.NoError(t, err, testCase.description) assert.Equal(t, len(cols), 1, testCase.description) - assert.Equal(t, len(cols[0].Indexes), len(testCase.targetDescriptions), testCase.description) + assert.Equal(t, len(cols[0].Description.Indexes), len(testCase.targetDescriptions), testCase.description) - for i, d := range cols[0].Indexes { + for i, d := range cols[0].Description.Indexes { assert.Equal(t, testCase.targetDescriptions[i], d, testCase.description) } } diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 261561ca8d..6176273b02 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -200,11 +200,11 @@ func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.Collectio if err != nil { return nil, err } - var colDesc client.CollectionDescription - if err := json.Unmarshal(data, &colDesc); err != nil { + var definition client.CollectionDefinition + if err := json.Unmarshal(data, &definition); err != nil { return nil, err } - return &Collection{w.cmd, colDesc}, nil + return &Collection{w.cmd, definition}, nil } func (w *Wrapper) GetCollectionBySchemaID(ctx context.Context, schemaId string) (client.Collection, error) { @@ -215,11 +215,11 @@ func (w *Wrapper) GetCollectionBySchemaID(ctx context.Context, schemaId string) if err != nil { return nil, err } - var colDesc client.CollectionDescription - if err := json.Unmarshal(data, &colDesc); err != nil { + var definition client.CollectionDefinition + if err := json.Unmarshal(data, &definition); err != nil { return nil, err } - return &Collection{w.cmd, colDesc}, nil + return &Collection{w.cmd, definition}, nil } func (w *Wrapper) GetCollectionByVersionID(ctx context.Context, versionId string) (client.Collection, error) { @@ -230,11 +230,11 @@ func (w *Wrapper) GetCollectionByVersionID(ctx context.Context, versionId string if err != nil { return nil, err } - var colDesc client.CollectionDescription - if err := json.Unmarshal(data, &colDesc); err != nil { + var definition client.CollectionDefinition + if err := json.Unmarshal(data, &definition); err != nil { return nil, err } - return &Collection{w.cmd, colDesc}, nil + return &Collection{w.cmd, definition}, nil } func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { @@ -244,7 +244,7 @@ func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, e if err != nil { return nil, err } - var colDesc []client.CollectionDescription + var colDesc []client.CollectionDefinition if err := json.Unmarshal(data, &colDesc); err != nil { return nil, err } diff --git a/tests/clients/cli/wrapper_collection.go b/tests/clients/cli/wrapper_collection.go index 3500bdce7c..4f4a97741d 100644 --- a/tests/clients/cli/wrapper_collection.go +++ b/tests/clients/cli/wrapper_collection.go @@ -26,37 +26,41 @@ import ( var _ client.Collection = (*Collection)(nil) type Collection struct { - cmd *cliWrapper - desc client.CollectionDescription + cmd *cliWrapper + def client.CollectionDefinition } func (c *Collection) Description() client.CollectionDescription { - return c.desc + return c.def.Description } func (c *Collection) Name() string { - return c.desc.Name + return c.Description().Name } func (c *Collection) Schema() client.SchemaDescription { - return c.desc.Schema + return c.def.Schema } func (c *Collection) ID() uint32 { - return c.desc.ID + return c.Description().ID } func (c *Collection) SchemaID() string { - return c.desc.Schema.SchemaID + return c.Schema().SchemaID +} + +func (c *Collection) Definition() client.CollectionDefinition { + return c.def } func (c *Collection) Create(ctx context.Context, doc *client.Document) error { args := []string{"client", "collection", "create"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } @@ -76,13 +80,13 @@ func (c *Collection) Create(ctx context.Context, doc *client.Document) error { func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { args := []string{"client", "collection", "create"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) docMapList := make([]map[string]any, len(docs)) for i, doc := range docs { // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } @@ -110,7 +114,7 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er func (c *Collection) Update(ctx context.Context, doc *client.Document) error { args := []string{"client", "collection", "update"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, "--key", doc.Key().String()) document, err := doc.ToJSONPatch() @@ -188,7 +192,7 @@ func (c *Collection) UpdateWithFilter( updater string, ) (*client.UpdateResult, error) { args := []string{"client", "collection", "update"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, "--updater", updater) filterJSON, err := json.Marshal(filter) @@ -206,7 +210,7 @@ func (c *Collection) UpdateWithKey( updater string, ) (*client.UpdateResult, error) { args := []string{"client", "collection", "update"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, "--key", key.String()) args = append(args, "--updater", updater) @@ -219,7 +223,7 @@ func (c *Collection) UpdateWithKeys( updater string, ) (*client.UpdateResult, error) { args := []string{"client", "collection", "update"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, "--updater", updater) keys := make([]string, len(docKeys)) @@ -261,7 +265,7 @@ func (c *Collection) deleteWith( func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client.DeleteResult, error) { args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) filterJSON, err := json.Marshal(filter) if err != nil { @@ -274,7 +278,7 @@ func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client. func (c *Collection) DeleteWithKey(ctx context.Context, docKey client.DocKey) (*client.DeleteResult, error) { args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, "--key", docKey.String()) return c.deleteWith(ctx, args) @@ -282,7 +286,7 @@ func (c *Collection) DeleteWithKey(ctx context.Context, docKey client.DocKey) (* func (c *Collection) DeleteWithKeys(ctx context.Context, docKeys []client.DocKey) (*client.DeleteResult, error) { args := []string{"client", "collection", "delete"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) keys := make([]string, len(docKeys)) for i, v := range docKeys { @@ -295,7 +299,7 @@ func (c *Collection) DeleteWithKeys(ctx context.Context, docKeys []client.DocKey func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { args := []string{"client", "collection", "get"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) args = append(args, key.String()) if showDeleted { @@ -315,14 +319,14 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { return &Collection{ - cmd: c.cmd.withTxn(tx), - desc: c.desc, + cmd: c.cmd.withTxn(tx), + def: c.def, } } func (c *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { args := []string{"client", "collection", "keys"} - args = append(args, "--name", c.desc.Name) + args = append(args, "--name", c.Description().Name) stdOut, _, err := c.cmd.executeStream(ctx, args) if err != nil { @@ -361,7 +365,7 @@ func (c *Collection) CreateIndex( indexDesc client.IndexDescription, ) (index client.IndexDescription, err error) { args := []string{"client", "index", "create"} - args = append(args, "--collection", c.desc.Name) + args = append(args, "--collection", c.Description().Name) args = append(args, "--name", indexDesc.Name) fields := make([]string, len(indexDesc.Fields)) @@ -382,7 +386,7 @@ func (c *Collection) CreateIndex( func (c *Collection) DropIndex(ctx context.Context, indexName string) error { args := []string{"client", "index", "drop"} - args = append(args, "--collection", c.desc.Name) + args = append(args, "--collection", c.Description().Name) args = append(args, "--name", indexName) _, err := c.cmd.execute(ctx, args) @@ -391,7 +395,7 @@ func (c *Collection) DropIndex(ctx context.Context, indexName string) error { func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { args := []string{"client", "index", "list"} - args = append(args, "--collection", c.desc.Name) + args = append(args, "--collection", c.Description().Name) data, err := c.cmd.execute(ctx, args) if err != nil { diff --git a/tests/integration/schema/updates/remove/simple_test.go b/tests/integration/schema/updates/remove/simple_test.go index 1bbb956f4b..19f9ea1836 100644 --- a/tests/integration/schema/updates/remove/simple_test.go +++ b/tests/integration/schema/updates/remove/simple_test.go @@ -143,7 +143,7 @@ func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Schema/Name" } ] `, - ExpectedError: "modifying the schema name is not supported. ExistingName: Users, ProposedName: ", + ExpectedError: "schema name can't be empty", }, }, } From 25d476771519510b4f4fa7db6ed8551821b54d8b Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Mon, 16 Oct 2023 15:10:24 -0700 Subject: [PATCH 27/55] refactor: P2P client interface (#1924) ## Relevant issue(s) Resolves #1883 ## Description Blocked by #1927 This PR moves the `client.P2P` implementation from `client.DB` to `net.Node`. This fixes the problems mentioned in the issue above and should increase test coverage of the HTTP and CLI clients. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? `make test` Specify the platform(s) on which this was tested: - MacOS --- README.md | 50 ++- cli/p2p_collection_add.go | 4 +- cli/p2p_collection_getall.go | 4 +- cli/p2p_collection_remove.go | 4 +- cli/p2p_info.go | 7 +- cli/p2p_replicator_delete.go | 30 +- cli/p2p_replicator_getall.go | 10 +- cli/p2p_replicator_set.go | 23 +- cli/start.go | 58 ++- cli/utils.go | 7 + client/db.go | 5 +- client/p2p.go | 8 + config/config.go | 3 +- db/backup_test.go | 20 +- db/db.go | 12 +- db/index_test.go | 6 +- db/p2p_collection.go | 69 ---- db/p2p_collection_test.go | 98 ----- db/replicator.go | 143 ------- db/replicator_test.go | 242 ----------- db/txn_db.go | 140 ------- http/client.go | 104 +---- http/client_p2p.go | 124 ++++++ http/errors.go | 1 + http/handler.go | 15 +- http/handler_p2p.go | 138 +++++++ http/handler_store.go | 98 ----- http/middleware.go | 3 - http/server.go | 9 - http/server_test.go | 5 - net/client_test.go | 10 +- net/dag_test.go | 24 +- net/errors.go | 17 + net/node.go | 32 +- net/node_test.go | 26 +- net/peer.go | 381 +----------------- net/peer_collection.go | 173 ++++++++ net/peer_replicator.go | 207 ++++++++++ net/peer_test.go | 37 +- net/server_test.go | 3 +- tests/bench/collection/utils.go | 6 +- tests/bench/query/planner/utils.go | 2 +- tests/bench/query/simple/utils.go | 2 +- tests/clients/cli/wrapper.go | 67 ++- tests/clients/clients.go | 26 ++ tests/clients/http/wrapper.go | 42 +- tests/integration/client.go | 85 ++++ tests/integration/db.go | 151 +++++++ tests/integration/explain.go | 2 +- tests/integration/lens.go | 4 +- tests/integration/net/order/utils.go | 17 +- .../simple/replicator/with_create_test.go | 50 +++ tests/integration/p2p.go | 95 +++-- tests/integration/state.go | 11 +- tests/integration/utils2.go | 280 +++---------- 55 files changed, 1418 insertions(+), 1772 deletions(-) delete mode 100644 db/p2p_collection.go delete mode 100644 db/p2p_collection_test.go delete mode 100644 db/replicator.go delete mode 100644 db/replicator_test.go create mode 100644 http/client_p2p.go create mode 100644 http/handler_p2p.go create mode 100644 net/peer_collection.go create mode 100644 net/peer_replicator.go create mode 100644 tests/clients/clients.go create mode 100644 tests/integration/client.go create mode 100644 tests/integration/db.go diff --git a/README.md b/README.md index 4d6afa6664..0406802b1d 100644 --- a/README.md +++ b/README.md @@ -244,13 +244,20 @@ When starting a node for the first time, a key pair is generated and stored in i Each node has a unique `PeerID` generated from its public key. This ID allows other nodes to connect to it. +To view your node's peer info: + +```shell +defradb client p2p info +``` + There are two types of peer-to-peer relationships supported: **pubsub** peering and **replicator** peering. Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have share a document and want to keep them in sync. Replicator peering *actively* pushes changes from a specific collection *to* a target peer. -### Pubsub example +
+Pubsub example Pubsub peers can be specified on the command line using the `--peers` flag, which accepts a comma-separated list of peer [multiaddresses](https://docs.libp2p.io/concepts/addressing/). For example, a node at IP `192.168.1.12` listening on 9000 with PeerID `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B` would be referred to using the multiaddress `/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`. @@ -258,16 +265,22 @@ Let's go through an example of two nodes (*nodeA* and *nodeB*) connecting with e Start *nodeA* with a default configuration: -``` +```shell defradb start ``` -Obtain the PeerID from its console output. In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different. +Obtain the node's peer info: + +```shell +defradb client p2p info +``` + +In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different. For *nodeB*, we provide the following configuration: -``` -defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B +```shell +defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B ``` About the flags: @@ -275,26 +288,29 @@ About the flags: - `--rootdir` specifies the root dir (config and data) to use - `--url` is the address to listen on for the client HTTP and GraphQL API - `--p2paddr` is the multiaddress for the P2P networking to listen on -- `--tcpaddr` is the multiaddress for the gRPC server to listen on - `--peers` is a comma-separated list of peer multiaddresses This starts two nodes and connects them via pubsub networking. +
-### Collection subscription example +
+Subscription example -It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the `rpc p2pcollection` command: +It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the following command: ```shell -defradb client rpc p2pcollection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske +defradb client p2p collection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske ``` Multiple collection IDs can be added at once. ```shell -defradb client rpc p2pcollection add --url localhost:9182 +defradb client p2p collection add --url localhost:9182 ,, ``` +
-### Replicator example +
+Replicator example Replicator peering is targeted: it allows a node to actively send updates to another node. Let's go through an example of *nodeA* actively replicating to *nodeB*: @@ -334,14 +350,20 @@ defradb client schema add --url localhost:9182 ' ' ``` -Set *nodeA* to actively replicate the "Article" collection to *nodeB*: +Then copy the peer info from *nodeB*: ```shell -defradb client rpc replicator set -c "Article" /ip4/0.0.0.0/tcp/9172/p2p/ +defradb client p2p info --url localhost:9182 ``` -As we add or update documents in the "Article" collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub. +Set *nodeA* to actively replicate the Article collection to *nodeB*: + +```shell +defradb client p2p replicator set -c Article +``` +As we add or update documents in the Article collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub. +
## Securing the HTTP API with TLS diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index 86e0d8f6f9..dedae0a358 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -31,7 +31,7 @@ Example: add multiple collections `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) var collectionIDs []string for _, id := range strings.Split(args[0], ",") { @@ -42,7 +42,7 @@ Example: add multiple collections collectionIDs = append(collectionIDs, id) } - return store.AddP2PCollections(cmd.Context(), collectionIDs) + return p2p.AddP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index c07a63f453..10d98582c6 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -22,9 +22,9 @@ func MakeP2PCollectionGetAllCommand() *cobra.Command { This is the list of collections of the node that are synchronized on the pubsub network.`, Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) - cols, err := store.GetAllP2PCollections(cmd.Context()) + cols, err := p2p.GetAllP2PCollections(cmd.Context()) if err != nil { return err } diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index 0c4d14effd..8aa0b5b7df 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -31,7 +31,7 @@ Example: remove multiple collections `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) var collectionIDs []string for _, id := range strings.Split(args[0], ",") { @@ -42,7 +42,7 @@ Example: remove multiple collections collectionIDs = append(collectionIDs, id) } - return store.RemoveP2PCollections(cmd.Context(), collectionIDs) + return p2p.RemoveP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/p2p_info.go b/cli/p2p_info.go index 1ddad18a52..36adfb8fac 100644 --- a/cli/p2p_info.go +++ b/cli/p2p_info.go @@ -23,12 +23,7 @@ func MakeP2PInfoCommand() *cobra.Command { Long: `Get peer info from a DefraDB node`, RunE: func(cmd *cobra.Command, args []string) error { db := cmd.Context().Value(dbContextKey).(*http.Client) - - res, err := db.PeerInfo(cmd.Context()) - if err != nil { - return err - } - return writeJSON(cmd, res) + return writeJSON(cmd, db.PeerInfo()) }, } return cmd diff --git a/cli/p2p_replicator_delete.go b/cli/p2p_replicator_delete.go index 7504d0c932..6cc2ddf785 100644 --- a/cli/p2p_replicator_delete.go +++ b/cli/p2p_replicator_delete.go @@ -11,6 +11,8 @@ package cli import ( + "encoding/json" + "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" @@ -18,20 +20,32 @@ import ( ) func MakeP2PReplicatorDeleteCommand() *cobra.Command { + var collections []string var cmd = &cobra.Command{ - Use: "delete ", - Short: "Delete a replicator. It will stop synchronizing", - Long: `Delete a replicator. It will stop synchronizing.`, - Args: cobra.ExactArgs(1), + Use: "delete [-c, --collection] ", + Short: "Delete replicator(s) and stop synchronization", + Long: `Delete replicator(s) and stop synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator delete -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' + `, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) - addr, err := peer.AddrInfoFromString(args[0]) - if err != nil { + var info peer.AddrInfo + if err := json.Unmarshal([]byte(args[0]), &info); err != nil { return err } - return store.DeleteReplicator(cmd.Context(), client.Replicator{Info: *addr}) + rep := client.Replicator{ + Info: info, + Schemas: collections, + } + return p2p.DeleteReplicator(cmd.Context(), rep) }, } + cmd.Flags().StringSliceVarP(&collections, "collection", "c", + []string{}, "Collection(s) to stop replicating") return cmd } diff --git a/cli/p2p_replicator_getall.go b/cli/p2p_replicator_getall.go index 9192ed4d10..4bdf6e8487 100644 --- a/cli/p2p_replicator_getall.go +++ b/cli/p2p_replicator_getall.go @@ -19,11 +19,15 @@ func MakeP2PReplicatorGetAllCommand() *cobra.Command { Use: "getall", Short: "Get all replicators", Long: `Get all the replicators active in the P2P data sync system. -These are the replicators that are currently replicating data from one node to another.`, +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator getall + `, RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) - reps, err := store.GetAllReplicators(cmd.Context()) + reps, err := p2p.GetAllReplicators(cmd.Context()) if err != nil { return err } diff --git a/cli/p2p_replicator_set.go b/cli/p2p_replicator_set.go index 6b590b6ea7..5d9c712a82 100644 --- a/cli/p2p_replicator_set.go +++ b/cli/p2p_replicator_set.go @@ -11,6 +11,8 @@ package cli import ( + "encoding/json" + "github.com/libp2p/go-libp2p/core/peer" "github.com/spf13/cobra" @@ -21,27 +23,30 @@ func MakeP2PReplicatorSetCommand() *cobra.Command { var collections []string var cmd = &cobra.Command{ Use: "set [-c, --collection] ", - Short: "Set a P2P replicator", - Long: `Add a new target replicator. -A replicator replicates one or all collection(s) from this node to another. + Short: "Add replicator(s) and start synchronization", + Long: `Add replicator(s) and start synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator set -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' `, Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - store := mustGetStoreContext(cmd) + p2p := mustGetP2PContext(cmd) - addr, err := peer.AddrInfoFromString(args[0]) - if err != nil { + var info peer.AddrInfo + if err := json.Unmarshal([]byte(args[0]), &info); err != nil { return err } rep := client.Replicator{ - Info: *addr, + Info: info, Schemas: collections, } - return store.SetReplicator(cmd.Context(), rep) + return p2p.SetReplicator(cmd.Context(), rep) }, } cmd.Flags().StringSliceVarP(&collections, "collection", "c", - []string{}, "Define the collection for the replicator") + []string{}, "Collection(s) to replicate") return cmd } diff --git a/cli/start.go b/cli/start.go index f0f8b19a8a..da99ae06ba 100644 --- a/cli/start.go +++ b/cli/start.go @@ -171,15 +171,10 @@ type defraInstance struct { func (di *defraInstance) close(ctx context.Context) { if di.node != nil { - if err := di.node.Close(); err != nil { - log.FeedbackInfo( - ctx, - "The node could not be closed successfully", - logging.NewKV("Error", err.Error()), - ) - } + di.node.Close() + } else { + di.db.Close() } - di.db.Close(ctx) if err := di.server.Close(); err != nil { log.FeedbackInfo( ctx, @@ -222,7 +217,7 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { } // init the p2p node - var n *net.Node + var node *net.Node if !cfg.Net.P2PDisabled { nodeOpts := []net.NodeOpt{ net.WithConfig(cfg), @@ -239,9 +234,9 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { nodeOpts = append(nodeOpts, net.WithPrivateKey(key)) } log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = net.NewNode(ctx, db, nodeOpts...) + node, err = net.NewNode(ctx, db, nodeOpts...) if err != nil { - db.Close(ctx) + db.Close() return nil, errors.Wrap("failed to start P2P node", err) } @@ -253,14 +248,11 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - n.Bootstrap(addrs) + node.Bootstrap(addrs) } - if err := n.Start(); err != nil { - if e := n.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to close node: %v", e.Error()), err) - } - db.Close(ctx) + if err := node.Start(); err != nil { + node.Close() return nil, errors.Wrap("failed to start P2P listeners", err) } } @@ -271,10 +263,6 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { httpapi.WithAllowedOrigins(cfg.API.AllowedOrigins...), } - if n != nil { - sOpt = append(sOpt, httpapi.WithPeerID(n.PeerID().String())) - } - if cfg.API.TLS { sOpt = append( sOpt, @@ -284,32 +272,36 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { ) } - s := httpapi.NewServer(db, sOpt...) - if err := s.Listen(ctx); err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", s.Addr), err) + var server *httpapi.Server + if node != nil { + server = httpapi.NewServer(node, sOpt...) + } else { + server = httpapi.NewServer(db, sOpt...) + } + if err := server.Listen(ctx); err != nil { + return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err) } // save the address on the config in case the port number was set to random - cfg.API.Address = s.AssignedAddr() + cfg.API.Address = server.AssignedAddr() // run the server in a separate goroutine go func() { log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", cfg.API.AddressToURL())) - if err := s.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { + if err := server.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err) - if n != nil { - if err := n.Close(); err != nil { - log.FeedbackErrorE(ctx, "Failed to close node", err) - } + if node != nil { + node.Close() + } else { + db.Close() } - db.Close(ctx) os.Exit(1) } }() return &defraInstance{ - node: n, + node: node, db: db, - server: s, + server: server, }, nil } diff --git a/cli/utils.go b/cli/utils.go index d45808145e..8c1a40dc1f 100644 --- a/cli/utils.go +++ b/cli/utils.go @@ -52,6 +52,13 @@ func mustGetStoreContext(cmd *cobra.Command) client.Store { return cmd.Context().Value(storeContextKey).(client.Store) } +// mustGetP2PContext returns the p2p implementation for the current command context. +// +// If a p2p implementation is not set in the current context this function panics. +func mustGetP2PContext(cmd *cobra.Command) client.P2P { + return cmd.Context().Value(dbContextKey).(client.P2P) +} + // tryGetCollectionContext returns the collection for the current command context // and a boolean indicating if the collection was set. func tryGetCollectionContext(cmd *cobra.Command) (client.Collection, bool) { diff --git a/client/db.go b/client/db.go index 47cd7d5a85..5e4873d8dc 100644 --- a/client/db.go +++ b/client/db.go @@ -60,7 +60,7 @@ type DB interface { // be created after calling this to resume operations on the prior data - this is however dependant on // the behaviour of the rootstore provided on database instance creation, as this function will Close // the provided rootstore. - Close(context.Context) + Close() // Events returns the database event queue. // @@ -82,9 +82,6 @@ type DB interface { // Store contains the core DefraDB read-write operations. type Store interface { - // P2P holds the P2P related methods that must be implemented by the database. - P2P - // Backup holds the backup related methods that must be implemented by the database. Backup diff --git a/client/p2p.go b/client/p2p.go index 800b946240..12be6ebf8d 100644 --- a/client/p2p.go +++ b/client/p2p.go @@ -12,9 +12,17 @@ package client import ( "context" + + "github.com/libp2p/go-libp2p/core/peer" ) +// P2P is a peer connected database implementation. type P2P interface { + DB + + // PeerInfo returns the p2p host id and listening addresses. + PeerInfo() peer.AddrInfo + // SetReplicator adds a replicator to the persisted list or adds // schemas if the replicator already exists. SetReplicator(ctx context.Context, rep Replicator) error diff --git a/config/config.go b/config/config.go index 3b2a212c0a..24d5ee73e7 100644 --- a/config/config.go +++ b/config/config.go @@ -375,10 +375,11 @@ func (netcfg *NetConfig) validate() error { peers := strings.Split(netcfg.Peers, ",") maddrs := make([]ma.Multiaddr, len(peers)) for i, addr := range peers { - maddrs[i], err = ma.NewMultiaddr(addr) + addr, err := ma.NewMultiaddr(addr) if err != nil { return NewErrInvalidBootstrapPeers(err, netcfg.Peers) } + maddrs[i] = addr } } return nil diff --git a/db/backup_test.go b/db/backup_test.go index 2f89f54a07..f0e7a6e338 100644 --- a/db/backup_test.go +++ b/db/backup_test.go @@ -25,7 +25,7 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -87,7 +87,7 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -149,7 +149,7 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -211,7 +211,7 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -285,7 +285,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -355,7 +355,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -414,7 +414,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -449,7 +449,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -484,7 +484,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -520,7 +520,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String diff --git a/db/db.go b/db/db.go index 0bc9a361c3..9e73db04bd 100644 --- a/db/db.go +++ b/db/db.go @@ -183,10 +183,6 @@ func (db *db) Blockstore() blockstore.Blockstore { return db.multistore.DAGstore() } -func (db *db) systemstore() datastore.DSReaderWriter { - return db.multistore.Systemstore() -} - func (db *db) LensRegistry() client.LensRegistry { return db.lensRegistry } @@ -266,17 +262,17 @@ func (db *db) PrintDump(ctx context.Context) error { // Close is called when we are shutting down the database. // This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance). -func (db *db) Close(ctx context.Context) { - log.Info(ctx, "Closing DefraDB process...") +func (db *db) Close() { + log.Info(context.Background(), "Closing DefraDB process...") if db.events.Updates.HasValue() { db.events.Updates.Value().Close() } err := db.rootstore.Close() if err != nil { - log.ErrorE(ctx, "Failure closing running process", err) + log.ErrorE(context.Background(), "Failure closing running process", err) } - log.Info(ctx, "Successfully closed running process") + log.Info(context.Background(), "Successfully closed running process") } func printStore(ctx context.Context, store datastore.DSReaderWriter) error { diff --git a/db/index_test.go b/db/index_test.go index d22746a363..f8f3d0b8e6 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -1153,8 +1153,7 @@ func TestDropIndex_ShouldDeleteIndex(t *testing.T) { func TestDropIndex_IfStorageFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) desc := f.createUserCollectionIndexOnName() - - f.db.Close(f.ctx) + f.db.Close() err := f.dropIndex(productsColName, desc.Name) assert.Error(t, err) @@ -1290,8 +1289,7 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) f.createUserCollectionIndexOnName() - - f.db.Close(f.ctx) + f.db.Close() err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.Error(t, err) diff --git a/db/p2p_collection.go b/db/p2p_collection.go deleted file mode 100644 index 02fc4139c2..0000000000 --- a/db/p2p_collection.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - - dsq "github.com/ipfs/go-datastore/query" - - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" -) - -const marker = byte(0xff) - -// addP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *db) addP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error { - _, err := db.getCollectionBySchemaID(ctx, txn, collectionID) - if err != nil { - return NewErrAddingP2PCollection(err) - } - key := core.NewP2PCollectionKey(collectionID) - return txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker}) -} - -// removeP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *db) removeP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error { - _, err := db.getCollectionBySchemaID(ctx, txn, collectionID) - if err != nil { - return NewErrRemovingP2PCollection(err) - } - key := core.NewP2PCollectionKey(collectionID) - return txn.Systemstore().Delete(ctx, key.ToDS()) -} - -// getAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *db) getAllP2PCollections(ctx context.Context, txn datastore.Txn) ([]string, error) { - prefix := core.NewP2PCollectionKey("") - results, err := db.systemstore().Query(ctx, dsq.Query{ - Prefix: prefix.ToString(), - }) - if err != nil { - return nil, err - } - - collectionIDs := []string{} - for result := range results.Next() { - key, err := core.NewP2PCollectionKeyFromString(result.Key) - if err != nil { - return nil, err - } - collectionIDs = append(collectionIDs, key.CollectionID) - } - - return collectionIDs, nil -} diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go deleted file mode 100644 index 67d5393c66..0000000000 --- a/db/p2p_collection_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func newTestCollection( - t *testing.T, - ctx context.Context, - db *implicitTxnDB, - name string, -) client.Collection { - _, err := db.AddSchema( - ctx, - fmt.Sprintf( - `type %s { - Name: String - }`, - name, - ), - ) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, name) - require.NoError(t, err) - - return col -} - -func TestAddP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col := newTestCollection(t, ctx, db, "test") - - err = db.AddP2PCollections(ctx, []string{col.SchemaID()}) - require.NoError(t, err) -} - -func TestGetAllP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col1 := newTestCollection(t, ctx, db, "test1") - col2 := newTestCollection(t, ctx, db, "test2") - col3 := newTestCollection(t, ctx, db, "test3") - - collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()} - err = db.AddP2PCollections(ctx, collectionIDs) - require.NoError(t, err) - - collections, err := db.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.ElementsMatch(t, collections, collectionIDs) -} - -func TestRemoveP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col1 := newTestCollection(t, ctx, db, "test1") - col2 := newTestCollection(t, ctx, db, "test2") - col3 := newTestCollection(t, ctx, db, "test3") - - collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()} - - err = db.AddP2PCollections(ctx, collectionIDs) - require.NoError(t, err) - - err = db.RemoveP2PCollections(ctx, []string{col2.SchemaID()}) - require.NoError(t, err) - - collections, err := db.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.ElementsMatch(t, collections, []string{col1.SchemaID(), col3.SchemaID()}) -} diff --git a/db/replicator.go b/db/replicator.go deleted file mode 100644 index 84c94b9f5d..0000000000 --- a/db/replicator.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "encoding/json" - "errors" - - ds "github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-datastore/query" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" -) - -// setReplicator adds a new replicator to the database. -func (db *db) setReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - existingRep, err := db.getReplicator(ctx, rep.Info) - if errors.Is(err, ds.ErrNotFound) { - return db.saveReplicator(ctx, txn, rep) - } - if err != nil { - return err - } - - newSchemas := []string{} - for _, newSchema := range rep.Schemas { - isNew := true - for _, existingSchema := range existingRep.Schemas { - if existingSchema == newSchema { - isNew = false - break - } - } - if isNew { - newSchemas = append(newSchemas, newSchema) - } - } - rep.Schemas = append(existingRep.Schemas, newSchemas...) - return db.saveReplicator(ctx, txn, rep) -} - -// deleteReplicator removes a replicator from the database. -func (db *db) deleteReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - if len(rep.Schemas) == 0 { - return db.deleteReplicatorKey(ctx, txn, rep.Info.ID) - } - return db.deleteSchemasForReplicator(ctx, txn, rep) -} - -func (db *db) deleteReplicatorKey(ctx context.Context, txn datastore.Txn, pid peer.ID) error { - key := core.NewReplicatorKey(pid.String()) - return txn.Systemstore().Delete(ctx, key.ToDS()) -} - -func (db *db) deleteSchemasForReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - existingRep, err := db.getReplicator(ctx, rep.Info) - if err != nil { - return err - } - - updatedSchemaList := []string{} - for _, s := range existingRep.Schemas { - found := false - for _, toDelete := range rep.Schemas { - if toDelete == s { - found = true - break - } - } - if !found { - updatedSchemaList = append(updatedSchemaList, s) - } - } - - if len(updatedSchemaList) == 0 { - return db.deleteReplicatorKey(ctx, txn, rep.Info.ID) - } - - existingRep.Schemas = updatedSchemaList - return db.saveReplicator(ctx, txn, existingRep) -} - -// GetAllReplicators returns all replicators of the database. -func (db *db) getAllReplicators(ctx context.Context, txn datastore.Txn) ([]client.Replicator, error) { - reps := []client.Replicator{} - // create collection system prefix query - prefix := core.NewReplicatorKey("") - results, err := txn.Systemstore().Query(ctx, dsq.Query{ - Prefix: prefix.ToString(), - }) - if err != nil { - return nil, err - } - - for result := range results.Next() { - var rep client.Replicator - err = json.Unmarshal(result.Value, &rep) - if err != nil { - return nil, err - } - - reps = append(reps, rep) - } - - return reps, nil -} - -func (db *db) getReplicator(ctx context.Context, info peer.AddrInfo) (client.Replicator, error) { - rep := client.Replicator{} - key := core.NewReplicatorKey(info.ID.String()) - value, err := db.systemstore().Get(ctx, key.ToDS()) - if err != nil { - return rep, err - } - - err = json.Unmarshal(value, &rep) - if err != nil { - return rep, err - } - - return rep, nil -} - -func (db *db) saveReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - key := core.NewReplicatorKey(rep.Info.ID.String()) - repBytes, err := json.Marshal(rep) - if err != nil { - return err - } - return txn.Systemstore().Put(ctx, key.ToDS(), repBytes) -} diff --git a/db/replicator_test.go b/db/replicator_test.go deleted file mode 100644 index f21ab585a9..0000000000 --- a/db/replicator_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "testing" - - ds "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestSetReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - assert.NoError(t, err) -} - -func TestGetAllReplicatorsWith2Addition(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info2, err := peer.AddrInfoFromP2pAddr(a2) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test"}, - }, - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} - -func TestGetAllReplicatorsWith2AdditionsOnSamePeer(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} - -func TestDeleteSchemaForReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - err = db.DeleteReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test2"}, - }) - require.NoError(t, err) - - rep, err := db.getReplicator(ctx, *info) - require.NoError(t, err) - - assert.Equal(t, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test3"}, - }, rep) -} - -func TestDeleteAllSchemasForReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - err = db.DeleteReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - _, err = db.getReplicator(ctx, *info) - require.ErrorIs(t, err, ds.ErrNotFound) -} - -func TestDeleteReplicatorWith2Addition(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info2, err := peer.AddrInfoFromP2pAddr(a2) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test"}, - }, - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) - - err = db.DeleteReplicator(ctx, client.Replicator{Info: *info}) - require.NoError(t, err) - - reps, err = db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} diff --git a/db/txn_db.go b/db/txn_db.go index e996d9a9c8..0627f8ebc8 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -121,70 +121,6 @@ func (db *explicitTxnDB) GetCollectionByVersionID( return db.getCollectionByVersionID(ctx, db.txn, schemaVersionID) } -// AddP2PCollections adds the given collection IDs to the P2P system and -// subscribes to their topics. It will error if any of the provided -// collection IDs are invalid. -func (db *implicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error { - txn, err := db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - for _, collectionID := range collectionIDs { - err = db.addP2PCollection(ctx, txn, collectionID) - if err != nil { - return err - } - } - return txn.Commit(ctx) -} - -// AddP2PCollections adds the given collection IDs to the P2P system and -// subscribes to their topics. It will error if any of the provided -// collection IDs are invalid. -func (db *explicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error { - for _, collectionID := range collectionIDs { - err := db.addP2PCollection(ctx, db.txn, collectionID) - if err != nil { - return err - } - } - return nil -} - -// RemoveP2PCollections removes the given collection IDs from the P2P system and -// unsubscribes from their topics. It will error if the provided -// collection IDs are invalid. -func (db *implicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { - txn, err := db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - for _, collectionID := range collectionIDs { - err = db.removeP2PCollection(ctx, txn, collectionID) - if err != nil { - return err - } - } - return txn.Commit(ctx) -} - -// RemoveP2PCollections removes the given collection IDs from the P2P system and -// unsubscribes from their topics. It will error if the provided -// collection IDs are invalid. -func (db *explicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { - for _, collectionID := range collectionIDs { - err := db.removeP2PCollection(ctx, db.txn, collectionID) - if err != nil { - return err - } - } - return nil -} - // GetAllCollections gets all the currently defined collections. func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { txn, err := db.NewTxn(ctx, true) @@ -332,82 +268,6 @@ func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig return db.lensRegistry.SetMigration(ctx, cfg) } -// SetReplicator adds a new replicator to the database. -func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { - txn, err := db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - err = db.setReplicator(ctx, txn, rep) - if err != nil { - return err - } - - return txn.Commit(ctx) -} - -// SetReplicator adds a new replicator to the database. -func (db *explicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { - return db.setReplicator(ctx, db.txn, rep) -} - -// DeleteReplicator removes a replicator from the database. -func (db *implicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - txn, err := db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - err = db.deleteReplicator(ctx, txn, rep) - if err != nil { - return err - } - - return txn.Commit(ctx) -} - -// DeleteReplicator removes a replicator from the database. -func (db *explicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - return db.deleteReplicator(ctx, db.txn, rep) -} - -// GetAllReplicators returns all replicators of the database. -func (db *implicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - return db.getAllReplicators(ctx, txn) -} - -// GetAllReplicators returns all replicators of the database. -func (db *explicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - return db.getAllReplicators(ctx, db.txn) -} - -// GetAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *implicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - return db.getAllP2PCollections(ctx, txn) -} - -// GetAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *explicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { - return db.getAllP2PCollections(ctx, db.txn) -} - // BasicImport imports a json dataset. // filepath must be accessible to the node. func (db *implicitTxnDB) BasicImport(ctx context.Context, filepath string) error { diff --git a/http/client.go b/http/client.go index 21006f2194..d74e4f404d 100644 --- a/http/client.go +++ b/http/client.go @@ -87,94 +87,6 @@ func (c *Client) WithTxn(tx datastore.Txn) client.Store { return &Client{client} } -func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") - - body, err := json.Marshal(rep) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err -} - -func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") - - body, err := json.Marshal(rep) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err -} - -func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var reps []client.Replicator - if err := c.http.requestJson(req, &reps); err != nil { - return nil, err - } - return reps, nil -} - -func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections") - - body, err := json.Marshal(collectionIDs) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err -} - -func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections") - - body, err := json.Marshal(collectionIDs) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err -} - -func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) { - methodURL := c.http.baseURL.JoinPath("p2p", "collections") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var cols []string - if err := c.http.requestJson(req, &cols); err != nil { - return nil, err - } - return cols, nil -} - func (c *Client) BasicImport(ctx context.Context, filepath string) error { methodURL := c.http.baseURL.JoinPath("backup", "import") @@ -425,21 +337,7 @@ func (c *Client) PrintDump(ctx context.Context) error { return err } -func (c *Client) PeerInfo(ctx context.Context) (*PeerInfoResponse, error) { - methodURL := c.http.baseURL.JoinPath("p2p", "info") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var res PeerInfoResponse - if err := c.http.requestJson(req, &res); err != nil { - return nil, err - } - return &res, nil -} - -func (c *Client) Close(ctx context.Context) { +func (c *Client) Close() { // do nothing } diff --git a/http/client_p2p.go b/http/client_p2p.go new file mode 100644 index 0000000000..8d5f470f99 --- /dev/null +++ b/http/client_p2p.go @@ -0,0 +1,124 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +func (c *Client) PeerInfo() peer.AddrInfo { + methodURL := c.http.baseURL.JoinPath("p2p", "info") + + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, methodURL.String(), nil) + if err != nil { + return peer.AddrInfo{} + } + var res peer.AddrInfo + if err := c.http.requestJson(req, &res); err != nil { + return peer.AddrInfo{} + } + return res +} + +func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + body, err := json.Marshal(rep) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + body, err := json.Marshal(rep) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var reps []client.Replicator + if err := c.http.requestJson(req, &reps); err != nil { + return nil, err + } + return reps, nil +} + +func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var cols []string + if err := c.http.requestJson(req, &cols); err != nil { + return nil, err + } + return cols, nil +} diff --git a/http/errors.go b/http/errors.go index 848d293a91..7e07053df5 100644 --- a/http/errors.go +++ b/http/errors.go @@ -37,6 +37,7 @@ var ( ErrMigrationNotFound = errors.New("migration not found") ErrMissingRequest = errors.New("missing request") ErrInvalidTransactionId = errors.New("invalid transaction id") + ErrP2PDisabled = errors.New("p2p network is disabled") ) type errorResponse struct { diff --git a/http/handler.go b/http/handler.go index d8cd33c444..b9b5754419 100644 --- a/http/handler.go +++ b/http/handler.go @@ -40,6 +40,7 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler { tx_handler := &txHandler{} store_handler := &storeHandler{} collection_handler := &collectionHandler{} + p2p_handler := &p2pHandler{} lens_handler := &lensHandler{} ccip_handler := &ccipHandler{} @@ -99,16 +100,16 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler { ccip.Post("/", ccip_handler.ExecCCIP) }) api.Route("/p2p", func(p2p chi.Router) { - p2p.Get("/info", store_handler.PeerInfo) + p2p.Get("/info", p2p_handler.PeerInfo) p2p.Route("/replicators", func(p2p_replicators chi.Router) { - p2p_replicators.Get("/", store_handler.GetAllReplicators) - p2p_replicators.Post("/", store_handler.SetReplicator) - p2p_replicators.Delete("/", store_handler.DeleteReplicator) + p2p_replicators.Get("/", p2p_handler.GetAllReplicators) + p2p_replicators.Post("/", p2p_handler.SetReplicator) + p2p_replicators.Delete("/", p2p_handler.DeleteReplicator) }) p2p.Route("/collections", func(p2p_collections chi.Router) { - p2p_collections.Get("/", store_handler.GetAllP2PCollections) - p2p_collections.Post("/", store_handler.AddP2PCollection) - p2p_collections.Delete("/", store_handler.RemoveP2PCollection) + p2p_collections.Get("/", p2p_handler.GetAllP2PCollections) + p2p_collections.Post("/", p2p_handler.AddP2PCollection) + p2p_collections.Delete("/", p2p_handler.RemoveP2PCollection) }) }) api.Route("/debug", func(debug chi.Router) { diff --git a/http/handler_p2p.go b/http/handler_p2p.go new file mode 100644 index 0000000000..cec11b8325 --- /dev/null +++ b/http/handler_p2p.go @@ -0,0 +1,138 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "net/http" + + "github.com/sourcenetwork/defradb/client" +) + +type p2pHandler struct{} + +func (s *p2pHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + responseJSON(rw, http.StatusOK, p2p.PeerInfo()) +} + +func (s *p2pHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var rep client.Replicator + if err := requestJSON(req, &rep); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.SetReplicator(req.Context(), rep) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var rep client.Replicator + if err := requestJSON(req, &rep); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.DeleteReplicator(req.Context(), rep) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + reps, err := p2p.GetAllReplicators(req.Context()) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, reps) +} + +func (s *p2pHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.AddP2PCollections(req.Context(), collectionIDs) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.RemoveP2PCollections(req.Context(), collectionIDs) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + cols, err := p2p.GetAllP2PCollections(req.Context()) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, cols) +} diff --git a/http/handler_store.go b/http/handler_store.go index 6361a7b900..ce58383548 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -22,92 +22,6 @@ import ( type storeHandler struct{} -func (s *storeHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - var rep client.Replicator - if err := requestJSON(req, &rep); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.SetReplicator(req.Context(), rep) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - var rep client.Replicator - if err := requestJSON(req, &rep); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.DeleteReplicator(req.Context(), rep) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - reps, err := store.GetAllReplicators(req.Context()) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, reps) -} - -func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - var collectionIDs []string - if err := requestJSON(req, &collectionIDs); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.AddP2PCollections(req.Context(), collectionIDs) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - var collectionIDs []string - if err := requestJSON(req, &collectionIDs); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.RemoveP2PCollections(req.Context(), collectionIDs) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - cols, err := store.GetAllP2PCollections(req.Context()) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, cols) -} - func (s *storeHandler) BasicImport(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) @@ -250,18 +164,6 @@ func (s *storeHandler) PrintDump(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) } -type PeerInfoResponse struct { - PeerID string `json:"peerID"` -} - -func (s *storeHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) { - var res PeerInfoResponse - if value, ok := req.Context().Value(peerIdContextKey).(string); ok { - res.PeerID = value - } - responseJSON(rw, http.StatusOK, &res) -} - type GraphQLRequest struct { Query string `json:"query"` } diff --git a/http/middleware.go b/http/middleware.go index 932797ff2c..d33cbfb5ff 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -53,8 +53,6 @@ var ( // If a transaction exists, all operations will be executed // in the current transaction context. colContextKey = contextKey("col") - // peerIdContextKey contains the peerId of the DefraDB node. - peerIdContextKey = contextKey("peerId") ) // CorsMiddleware handles cross origin request @@ -83,7 +81,6 @@ func ApiMiddleware(db client.DB, txs *sync.Map, opts ServerOptions) func(http.Ha ctx := req.Context() ctx = context.WithValue(ctx, dbContextKey, db) ctx = context.WithValue(ctx, txsContextKey, txs) - ctx = context.WithValue(ctx, peerIdContextKey, opts.PeerID) next.ServeHTTP(rw, req.WithContext(ctx)) }) } diff --git a/http/server.go b/http/server.go index ccfefb08b1..854a73f506 100644 --- a/http/server.go +++ b/http/server.go @@ -61,8 +61,6 @@ type Server struct { type ServerOptions struct { // AllowedOrigins is the list of allowed origins for CORS. AllowedOrigins []string - // PeerID is the p2p id of the server node. - PeerID string // TLS enables https when the value is present. TLS immutable.Option[TLSOptions] // RootDirectory is the directory for the node config. @@ -162,13 +160,6 @@ func WithCAEmail(email string) func(*Server) { } } -// WithPeerID returns an option to set the identifier of the server node. -func WithPeerID(id string) func(*Server) { - return func(s *Server) { - s.options.PeerID = id - } -} - // WithRootDir returns an option to set the root directory for the node config. func WithRootDir(rootDir string) func(*Server) { return func(s *Server) { diff --git a/http/server_test.go b/http/server_test.go index 790f710249..33db303454 100644 --- a/http/server_test.go +++ b/http/server_test.go @@ -221,11 +221,6 @@ func TestNewServerWithCAEmail(t *testing.T) { assert.Equal(t, "me@example.com", s.options.TLS.Value().Email) } -func TestNewServerWithPeerID(t *testing.T) { - s := NewServer(nil, WithPeerID("12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR")) - assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", s.options.PeerID) -} - func TestNewServerWithRootDir(t *testing.T) { dir := t.TempDir() s := NewServer(nil, WithRootDir(dir)) diff --git a/net/client_test.go b/net/client_test.go index e28c543175..f9fc495d40 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -15,7 +15,6 @@ import ( "testing" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -76,12 +75,7 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) n2.Start() - err := n1.host.Connect(ctx, peer.AddrInfo{ - ID: n2.PeerID(), - Addrs: []ma.Multiaddr{ - n2.host.Addrs()[0], - }, - }) + err := n1.host.Connect(ctx, n2.PeerInfo()) require.NoError(t, err) _, err = n1.db.AddSchema(ctx, `type User { @@ -116,6 +110,6 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { SchemaID: col.SchemaID(), Block: &EmptyNode{}, Priority: 1, - }, n2.PeerID()) + }, n2.PeerInfo().ID) require.NoError(t, err) } diff --git a/net/dag_test.go b/net/dag_test.go index 7373967a76..124c464db4 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -39,8 +39,7 @@ func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) { n.sendJobWorker() close(done) }() - err := n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -83,8 +82,7 @@ func TestSendJobWorker_WithNewJobWithClosePriorToProcessing_NoError(t *testing.T txn: txn, } - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -128,8 +126,7 @@ func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { } // Give the jobworker time to process the job. time.Sleep(100 * time.Microsecond) - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -174,8 +171,7 @@ func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { n.closeJob <- dsKey.DocKey - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -250,10 +246,8 @@ func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { } // Give the jobworker time to process the job. time.Sleep(100 * time.Microsecond) - err = n1.Close() - require.NoError(t, err) - err = n2.Close() - require.NoError(t, err) + n1.Close() + n2.Close() select { case <-done: case <-time.After(timeout): @@ -347,10 +341,8 @@ func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) { } // Give the jobworker time to process the job. time.Sleep(100 * time.Microsecond) - err = n1.Close() - require.NoError(t, err) - err = n2.Close() - require.NoError(t, err) + n1.Close() + n2.Close() select { case <-done: case <-time.After(timeout): diff --git a/net/errors.go b/net/errors.go index 3f8d4926c5..e9ac8fc748 100644 --- a/net/errors.go +++ b/net/errors.go @@ -13,6 +13,8 @@ package net import ( "fmt" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/defradb/errors" ) @@ -21,6 +23,9 @@ const ( errFailedToGetDockey = "failed to get DocKey from broadcast message" errPublishingToDockeyTopic = "can't publish log %s for dockey %s" errPublishingToSchemaTopic = "can't publish log %s for schema %s" + errReplicatorExists = "replicator already exists for %s with peerID %s" + errReplicatorDocKey = "failed to get dockey for replicator %s with peerID %s" + errReplicatorCollections = "failed to get collections for replicator" ) var ( @@ -47,3 +52,15 @@ func NewErrPublishingToDockeyTopic(inner error, cid, key string, kv ...errors.KV func NewErrPublishingToSchemaTopic(inner error, cid, key string, kv ...errors.KV) error { return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, key), inner, kv...) } + +func NewErrReplicatorExists(collection string, peerID peer.ID, kv ...errors.KV) error { + return errors.New(fmt.Sprintf(errReplicatorExists, collection, peerID), kv...) +} + +func NewErrReplicatorDocKey(inner error, collection string, peerID peer.ID, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errReplicatorDocKey, collection, peerID), inner, kv...) +} + +func NewErrReplicatorCollections(inner error, kv ...errors.KV) error { + return errors.Wrap(errReplicatorCollections, inner, kv...) +} diff --git a/net/node.go b/net/node.go index 392267fefa..b2a9f08ed8 100644 --- a/net/node.go +++ b/net/node.go @@ -51,10 +51,12 @@ import ( var evtWaitTimeout = 10 * time.Second +var _ client.P2P = (*Node)(nil) + // Node is a networked peer instance of DefraDB. type Node struct { // embed the DB interface into the node - DB client.DB + client.DB *Peer @@ -124,8 +126,8 @@ func NewNode( return ddht, err }), } - if options.EnableRelay { - libp2pOpts = append(libp2pOpts, libp2p.EnableRelay()) + if !options.EnableRelay { + libp2pOpts = append(libp2pOpts, libp2p.DisableRelay()) } h, err := libp2p.New(libp2pOpts...) @@ -223,21 +225,18 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) { } } -// ListenAddrs returns the Multiaddr list of the hosts' listening addresses. -func (n *Node) ListenAddrs() []multiaddr.Multiaddr { - return n.host.Addrs() -} - -// PeerID returns the node's peer ID. func (n *Node) PeerID() peer.ID { return n.host.ID() } -// PeerInfo returns the node's peer id and listening addresses. +func (n *Node) ListenAddrs() []multiaddr.Multiaddr { + return n.host.Network().ListenAddresses() +} + func (n *Node) PeerInfo() peer.AddrInfo { return peer.AddrInfo{ ID: n.host.ID(), - Addrs: n.host.Addrs(), + Addrs: n.host.Network().ListenAddresses(), } } @@ -405,7 +404,12 @@ func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, er } // Close closes the node and all its services. -func (n Node) Close() error { - n.cancel() - return n.Peer.Close() +func (n Node) Close() { + if n.cancel != nil { + n.cancel() + } + if n.Peer != nil { + n.Peer.Close() + } + n.DB.Close() } diff --git a/net/node_test.go b/net/node_test.go index 941d171726..3e50dfa797 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -60,9 +60,11 @@ func TestNewNode_WithEnableRelay_NoError(t *testing.T) { func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) require.NoError(t, err) - db.Close(ctx) + db.Close() + _, err = NewNode( context.Background(), db, @@ -111,8 +113,7 @@ func TestNodeClose_NoError(t *testing.T) { db, ) require.NoError(t, err) - err = n.Close() - require.NoError(t, err) + n.Close() } func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { @@ -223,7 +224,6 @@ func TestNodeConfig_NoError(t *testing.T) { for k, v := range options.ListenAddrs { require.Equal(t, expectedOptions.ListenAddrs[k], v) } - require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) } @@ -448,9 +448,10 @@ func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing } func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) @@ -472,9 +473,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) defer func() { evtWaitTimeout = 10 * time.Second }() + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) @@ -490,9 +492,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) } func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) @@ -510,9 +513,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t * } func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) @@ -534,9 +538,10 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T defer func() { evtWaitTimeout = 10 * time.Second }() + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) @@ -552,9 +557,10 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T } func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, ) require.NoError(t, err) diff --git a/net/peer.go b/net/peer.go index e24d124210..305df7caa9 100644 --- a/net/peer.go +++ b/net/peer.go @@ -14,7 +14,6 @@ package net import ( "context" - "fmt" "sync" "time" @@ -194,7 +193,7 @@ func (p *Peer) Start() error { } // Close the peer node and all its internal workers/goroutines/loops. -func (p *Peer) Close() error { +func (p *Peer) Close() { // close topics if err := p.server.removeAllPubsubTopics(); err != nil { log.ErrorE(p.ctx, "Error closing pubsub topics", err) @@ -234,7 +233,6 @@ func (p *Peer) Close() error { } p.cancel() - return nil } // handleBroadcast loop manages the transition of messages @@ -307,124 +305,6 @@ func (p *Peer) RegisterNewDocument( return p.server.publishLog(p.ctx, schemaID, req) } -// SetReplicator adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) SetReplicator( - ctx context.Context, - rep client.Replicator, -) error { - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return err - } - store := p.db.WithTxn(txn) - - err = p.setReplicator(ctx, store, rep.Info, rep.Schemas...) - if err != nil { - txn.Discard(ctx) - return err - } - - return txn.Commit(ctx) -} - -// setReplicator adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) setReplicator( - ctx context.Context, - store client.Store, - info peer.AddrInfo, - collectionNames ...string, -) error { - // verify collections - collections := []client.Collection{} - schemas := []string{} - if len(collectionNames) == 0 { - var err error - collections, err = store.GetAllCollections(ctx) - if err != nil { - return errors.Wrap("failed to get all collections for replicator", err) - } - for _, col := range collections { - schemas = append(schemas, col.SchemaID()) - } - } else { - for _, cName := range collectionNames { - col, err := store.GetCollectionByName(ctx, cName) - if err != nil { - return errors.Wrap("failed to get collection for replicator", err) - } - collections = append(collections, col) - schemas = append(schemas, col.SchemaID()) - } - } - - // make sure it's not ourselves - if info.ID == p.host.ID() { - return errors.New("can't target ourselves as a replicator") - } - if err := info.ID.Validate(); err != nil { - return err - } - - // Add the destination's peer multiaddress in the peerstore. - // This will be used during connection and stream creation by libp2p. - p.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL) - - // make sure we're not duplicating things - p.mu.Lock() - for _, col := range collections { - if reps, exists := p.replicators[col.SchemaID()]; exists { - if _, exists := reps[info.ID]; exists { - p.mu.Unlock() - return errors.New(fmt.Sprintf( - "Replicator already exists for %s with PeerID %s", - col.Name(), - info.ID, - )) - } - } else { - p.replicators[col.SchemaID()] = make(map[peer.ID]struct{}) - } - // add to replicators list for the collection - p.replicators[col.SchemaID()][info.ID] = struct{}{} - } - p.mu.Unlock() - - // Persist peer in datastore - err := p.db.SetReplicator(ctx, client.Replicator{ - Info: info, - Schemas: schemas, - }) - if err != nil { - return errors.Wrap("failed to persist replicator", err) - } - - for _, col := range collections { - // create read only txn and assign to col - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return errors.Wrap("failed to get txn", err) - } - col = col.WithTxn(txn) - - // get dockeys (all) - keysCh, err := col.GetAllDocKeys(ctx) - if err != nil { - txn.Discard(ctx) - return errors.Wrap( - fmt.Sprintf( - "Failed to get dockey for replicator %s on %s", - info.ID, - col.Name(), - ), - err, - ) - } - - p.pushToReplicator(ctx, txn, col, keysCh, info.ID) - } - return nil -} - func (p *Peer) pushToReplicator( ctx context.Context, txn datastore.Txn, @@ -491,102 +371,8 @@ func (p *Peer) pushToReplicator( } } -// DeleteReplicator removes a peer node from the replicators. -func (p *Peer) DeleteReplicator( - ctx context.Context, - rep client.Replicator, -) error { - log.Debug(ctx, "Received DeleteReplicator request") - - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return err - } - store := p.db.WithTxn(txn) - - err = p.deleteReplicator(ctx, store, rep.Info, rep.Schemas...) - if err != nil { - txn.Discard(ctx) - return err - } - - return txn.Commit(ctx) -} - -func (p *Peer) deleteReplicator( - ctx context.Context, - store client.Store, - info peer.AddrInfo, - collectionNames ...string, -) error { - // make sure it's not ourselves - if info.ID == p.host.ID() { - return ErrSelfTargetForReplicator - } - if err := info.ID.Validate(); err != nil { - return err - } - - // verify collections - schemas := []string{} - schemaMap := make(map[string]struct{}) - if len(collectionNames) == 0 { - var err error - collections, err := store.GetAllCollections(ctx) - if err != nil { - return errors.Wrap("failed to get all collections for replicator", err) - } - for _, col := range collections { - schemas = append(schemas, col.SchemaID()) - schemaMap[col.SchemaID()] = struct{}{} - } - } else { - for _, cName := range collectionNames { - col, err := store.GetCollectionByName(ctx, cName) - if err != nil { - return errors.Wrap("failed to get collection for replicator", err) - } - schemas = append(schemas, col.SchemaID()) - schemaMap[col.SchemaID()] = struct{}{} - } - } - - // make sure we're not duplicating things - p.mu.Lock() - defer p.mu.Unlock() - - totalSchemas := 0 // Lets keep track of how many schemas are left for the replicator. - for schema, rep := range p.replicators { - if _, exists := rep[info.ID]; exists { - if _, toDelete := schemaMap[schema]; toDelete { - delete(p.replicators[schema], info.ID) - } else { - totalSchemas++ - } - } - } - - if totalSchemas == 0 { - // Remove the destination's peer multiaddress in the peerstore. - p.host.Peerstore().ClearAddrs(info.ID) - } - - // Delete peer in datastore - return p.db.DeleteReplicator(ctx, client.Replicator{ - Info: peer.AddrInfo{ID: info.ID}, - Schemas: schemas, - }) -} - -// GetAllReplicators returns all replicators and the schemas that are replicated to them. -func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - log.Debug(ctx, "Received GetAllReplicators request") - - return p.db.GetAllReplicators(ctx) -} - func (p *Peer) loadReplicators(ctx context.Context) error { - reps, err := p.db.GetAllReplicators(ctx) + reps, err := p.GetAllReplicators(ctx) if err != nil { return errors.Wrap("failed to get replicators", err) } @@ -617,7 +403,7 @@ func (p *Peer) loadReplicators(ctx context.Context) error { } func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, error) { - collections, err := p.db.GetAllP2PCollections(ctx) + collections, err := p.GetAllP2PCollections(ctx) if err != nil && !errors.Is(err, ds.ErrNotFound) { return nil, err } @@ -794,164 +580,3 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { } return cause } - -// AddP2PCollection adds the given collectionID to the pubsup topics. -// -// It will error if the given collectionID is invalid, in such a case some of the -// changes to the server may still be applied. -// -// WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) AddP2PCollections( - ctx context.Context, - collectionIDs []string, -) error { - log.Debug(ctx, "Received AddP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return err - } - defer txn.Discard(p.ctx) - store := p.db.WithTxn(txn) - - // first let's make sure the collections actually exists - storeCollections := []client.Collection{} - for _, col := range collectionIDs { - storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) - if err != nil { - return err - } - storeCollections = append(storeCollections, storeCol) - } - - // Ensure we can add all the collections to the store on the transaction - // before adding to topics. - err = store.AddP2PCollections(p.ctx, collectionIDs) - if err != nil { - return err - } - - // Add pubsub topics and remove them if we get an error. - addedTopics := []string{} - for _, col := range collectionIDs { - err = p.server.addPubSubTopic(col, true) - if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, col) - } - - // After adding the collection topics, we remove the collections' documents - // from the pubsub topics to avoid receiving duplicate events. - removedTopics := []string{} - for _, col := range storeCollections { - keyChan, err := col.GetAllDocKeys(p.ctx) - if err != nil { - return err - } - for key := range keyChan { - err := p.server.removePubSubTopic(key.Key.String()) - if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, key.Key.String()) - } - } - - if err = txn.Commit(p.ctx); err != nil { - err = p.rollbackRemovePubSubTopics(removedTopics, err) - return p.rollbackAddPubSubTopics(addedTopics, err) - } - - return nil -} - -// RemoveP2PCollection removes the given collectionID from the pubsup topics. -// -// It will error if the given collectionID is invalid, in such a case some of the -// changes to the server may still be applied. -// -// WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) RemoveP2PCollections( - ctx context.Context, - collectionIDs []string, -) error { - log.Debug(ctx, "Received RemoveP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return err - } - defer txn.Discard(p.ctx) - store := p.db.WithTxn(txn) - - // first let's make sure the collections actually exists - storeCollections := []client.Collection{} - for _, col := range collectionIDs { - storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) - if err != nil { - return err - } - storeCollections = append(storeCollections, storeCol) - } - - // Ensure we can remove all the collections to the store on the transaction - // before adding to topics. - err = store.RemoveP2PCollections(p.ctx, collectionIDs) - if err != nil { - return err - } - - // Remove pubsub topics and add them back if we get an error. - removedTopics := []string{} - for _, col := range collectionIDs { - err = p.server.removePubSubTopic(col) - if err != nil { - return p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, col) - } - - // After removing the collection topics, we add back the collections' documents - // to the pubsub topics. - addedTopics := []string{} - for _, col := range storeCollections { - keyChan, err := col.GetAllDocKeys(p.ctx) - if err != nil { - return err - } - for key := range keyChan { - err := p.server.addPubSubTopic(key.Key.String(), true) - if err != nil { - return p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, key.Key.String()) - } - } - - if err = txn.Commit(p.ctx); err != nil { - err = p.rollbackAddPubSubTopics(addedTopics, err) - return p.rollbackRemovePubSubTopics(removedTopics, err) - } - - return nil -} - -// GetAllP2PCollections gets all the collectionIDs from the pubsup topics -func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { - log.Debug(ctx, "Received GetAllP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return nil, err - } - store := p.db.WithTxn(txn) - - collections, err := store.GetAllP2PCollections(p.ctx) - if err != nil { - txn.Discard(p.ctx) - return nil, err - } - - return collections, txn.Commit(p.ctx) -} diff --git a/net/peer_collection.go b/net/peer_collection.go new file mode 100644 index 0000000000..91e3f66154 --- /dev/null +++ b/net/peer_collection.go @@ -0,0 +1,173 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + + dsq "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" +) + +const marker = byte(0xff) + +func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := p.db.NewTxn(p.ctx, false) + if err != nil { + return err + } + defer txn.Discard(p.ctx) + + // first let's make sure the collections actually exists + storeCollections := []client.Collection{} + for _, col := range collectionIDs { + storeCol, err := p.db.WithTxn(txn).GetCollectionBySchemaID(p.ctx, col) + if err != nil { + return err + } + storeCollections = append(storeCollections, storeCol) + } + + // Ensure we can add all the collections to the store on the transaction + // before adding to topics. + for _, col := range storeCollections { + key := core.NewP2PCollectionKey(col.SchemaID()) + err = txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker}) + if err != nil { + return err + } + } + + // Add pubsub topics and remove them if we get an error. + addedTopics := []string{} + for _, col := range collectionIDs { + err = p.server.addPubSubTopic(col, true) + if err != nil { + return p.rollbackAddPubSubTopics(addedTopics, err) + } + addedTopics = append(addedTopics, col) + } + + // After adding the collection topics, we remove the collections' documents + // from the pubsub topics to avoid receiving duplicate events. + removedTopics := []string{} + for _, col := range storeCollections { + keyChan, err := col.GetAllDocKeys(p.ctx) + if err != nil { + return err + } + for key := range keyChan { + err := p.server.removePubSubTopic(key.Key.String()) + if err != nil { + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + removedTopics = append(removedTopics, key.Key.String()) + } + } + + if err = txn.Commit(p.ctx); err != nil { + err = p.rollbackRemovePubSubTopics(removedTopics, err) + return p.rollbackAddPubSubTopics(addedTopics, err) + } + + return nil +} + +func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := p.db.NewTxn(p.ctx, false) + if err != nil { + return err + } + defer txn.Discard(p.ctx) + + // first let's make sure the collections actually exists + storeCollections := []client.Collection{} + for _, col := range collectionIDs { + storeCol, err := p.db.WithTxn(txn).GetCollectionBySchemaID(p.ctx, col) + if err != nil { + return err + } + storeCollections = append(storeCollections, storeCol) + } + + // Ensure we can remove all the collections to the store on the transaction + // before adding to topics. + for _, col := range storeCollections { + key := core.NewP2PCollectionKey(col.SchemaID()) + err = txn.Systemstore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + } + + // Remove pubsub topics and add them back if we get an error. + removedTopics := []string{} + for _, col := range collectionIDs { + err = p.server.removePubSubTopic(col) + if err != nil { + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + removedTopics = append(removedTopics, col) + } + + // After removing the collection topics, we add back the collections' documents + // to the pubsub topics. + addedTopics := []string{} + for _, col := range storeCollections { + keyChan, err := col.GetAllDocKeys(p.ctx) + if err != nil { + return err + } + for key := range keyChan { + err := p.server.addPubSubTopic(key.Key.String(), true) + if err != nil { + return p.rollbackAddPubSubTopics(addedTopics, err) + } + addedTopics = append(addedTopics, key.Key.String()) + } + } + + if err = txn.Commit(p.ctx); err != nil { + err = p.rollbackAddPubSubTopics(addedTopics, err) + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + + return nil +} + +func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { + txn, err := p.db.NewTxn(p.ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(p.ctx) + + query := dsq.Query{ + Prefix: core.NewP2PCollectionKey("").ToString(), + } + results, err := txn.Systemstore().Query(ctx, query) + if err != nil { + return nil, err + } + + collectionIDs := []string{} + for result := range results.Next() { + key, err := core.NewP2PCollectionKeyFromString(result.Key) + if err != nil { + return nil, err + } + collectionIDs = append(collectionIDs, key.CollectionID) + } + + return collectionIDs, nil +} diff --git a/net/peer_replicator.go b/net/peer_replicator.go new file mode 100644 index 0000000000..ab3293625e --- /dev/null +++ b/net/peer_replicator.go @@ -0,0 +1,207 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "encoding/json" + + dsq "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" +) + +func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { + p.mu.Lock() + defer p.mu.Unlock() + + txn, err := p.db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + if rep.Info.ID == p.host.ID() { + return ErrSelfTargetForReplicator + } + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + var collections []client.Collection + switch { + case len(rep.Schemas) > 0: + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + collections = append(collections, col) + } + + default: + // default to all collections + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + if err != nil { + return NewErrReplicatorCollections(err) + } + } + rep.Schemas = nil + + // Add the destination's peer multiaddress in the peerstore. + // This will be used during connection and stream creation by libp2p. + p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) + + var added []client.Collection + for _, col := range collections { + reps, exists := p.replicators[col.SchemaID()] + if !exists { + p.replicators[col.SchemaID()] = make(map[peer.ID]struct{}) + } + if _, exists := reps[rep.Info.ID]; !exists { + // keep track of newly added collections so we don't + // push logs to a replicator peer multiple times. + p.replicators[col.SchemaID()][rep.Info.ID] = struct{}{} + added = append(added, col) + } + rep.Schemas = append(rep.Schemas, col.SchemaID()) + } + + // persist replicator to the datastore + repBytes, err := json.Marshal(rep) + if err != nil { + return err + } + key := core.NewReplicatorKey(rep.Info.ID.String()) + err = txn.Systemstore().Put(ctx, key.ToDS(), repBytes) + if err != nil { + return err + } + + // push all collection documents to the replicator peer + for _, col := range added { + keysCh, err := col.WithTxn(txn).GetAllDocKeys(ctx) + if err != nil { + return NewErrReplicatorDocKey(err, col.Name(), rep.Info.ID) + } + p.pushToReplicator(ctx, txn, col, keysCh, rep.Info.ID) + } + + return txn.Commit(ctx) +} + +func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + p.mu.Lock() + defer p.mu.Unlock() + + txn, err := p.db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + if rep.Info.ID == p.host.ID() { + return ErrSelfTargetForReplicator + } + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + var collections []client.Collection + switch { + case len(rep.Schemas) > 0: + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + collections = append(collections, col) + } + // make sure the replicator exists in the datastore + key := core.NewReplicatorKey(rep.Info.ID.String()) + _, err = txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return err + } + + default: + // default to all collections + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + if err != nil { + return NewErrReplicatorCollections(err) + } + } + rep.Schemas = nil + + schemaMap := make(map[string]struct{}) + for _, col := range collections { + schemaMap[col.SchemaID()] = struct{}{} + } + + // update replicators and add remaining schemas to rep + for key, val := range p.replicators { + if _, exists := val[rep.Info.ID]; exists { + if _, toDelete := schemaMap[key]; toDelete { + delete(p.replicators[key], rep.Info.ID) + } else { + rep.Schemas = append(rep.Schemas, key) + } + } + } + + if len(rep.Schemas) == 0 { + // Remove the destination's peer multiaddress in the peerstore. + p.host.Peerstore().ClearAddrs(rep.Info.ID) + } + + // persist the replicator to the store, deleting it if no schemas remain + key := core.NewReplicatorKey(rep.Info.ID.String()) + if len(rep.Schemas) == 0 { + return txn.Systemstore().Delete(ctx, key.ToDS()) + } + repBytes, err := json.Marshal(rep) + if err != nil { + return err + } + return txn.Systemstore().Put(ctx, key.ToDS(), repBytes) +} + +func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + txn, err := p.db.NewTxn(ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(ctx) + + // create collection system prefix query + query := dsq.Query{ + Prefix: core.NewReplicatorKey("").ToString(), + } + results, err := txn.Systemstore().Query(ctx, query) + if err != nil { + return nil, err + } + + var reps []client.Replicator + for result := range results.Next() { + var rep client.Replicator + if err = json.Unmarshal(result.Value, &rep); err != nil { + return nil, err + } + reps = append(reps, rep) + } + return reps, nil +} diff --git a/net/peer_test.go b/net/peer_test.go index 15a4a2e55a..92b7424e9f 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -201,7 +201,7 @@ func TestStartAndClose_NoError(t *testing.T) { err := n.Start() require.NoError(t, err) - db.Close(ctx) + db.Close() } func TestStart_WithKnownPeer_NoError(t *testing.T) { @@ -236,8 +236,8 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - db1.Close(ctx) - db2.Close(ctx) + db1.Close() + db2.Close() } func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { @@ -268,9 +268,7 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { t.Fatal(err) } n2.Bootstrap(addrs) - - err = n1.Close() - require.NoError(t, err) + n1.Close() // give time for n1 to close time.Sleep(100 * time.Millisecond) @@ -278,8 +276,8 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - db1.Close(ctx) - db2.Close(ctx) + db1.Close() + db2.Close() } func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { @@ -298,7 +296,7 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { err = n.Start() require.ErrorIs(t, err, ErrNilUpdateChannel) - db.Close(ctx) + db.Close() } func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { @@ -319,7 +317,7 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { err = n.Start() require.ErrorContains(t, err, "cannot subscribe to a closed channel") - db.Close(ctx) + db.Close() } func TestRegisterNewDocument_NoError(t *testing.T) { @@ -412,7 +410,7 @@ func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) @@ -435,7 +433,7 @@ func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { Info: *info, Schemas: []string{"User"}, }) - require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") + require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") } func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { @@ -488,10 +486,15 @@ func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + info := peer.AddrInfo{ + ID: n.PeerID(), + Addrs: n.ListenAddrs(), + } + + db.Close() err := n.Peer.DeleteReplicator(ctx, client.Replicator{ - Info: n.PeerInfo(), + Info: info, Schemas: []string{"User"}, }) require.ErrorContains(t, err, "datastore closed") @@ -518,7 +521,7 @@ func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { Info: n2.PeerInfo(), Schemas: []string{"User"}, }) - require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") + require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") } func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) { @@ -603,7 +606,7 @@ func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() _, err := n.Peer.GetAllReplicators(ctx) require.ErrorContains(t, err, "datastore closed") @@ -613,7 +616,7 @@ func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() err := n.Peer.loadReplicators(ctx) require.ErrorContains(t, err, "datastore closed") diff --git a/net/server_test.go b/net/server_test.go index 937b4c34b4..86ef798029 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -37,7 +37,8 @@ func TestNewServerSimple(t *testing.T) { func TestNewServerWithDBClosed(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() + _, err := newServer(n.Peer, db) require.ErrorIs(t, err, memory.ErrClosed) } diff --git a/tests/bench/collection/utils.go b/tests/bench/collection/utils.go index 68df9531ed..dfb63fc86b 100644 --- a/tests/bench/collection/utils.go +++ b/tests/bench/collection/utils.go @@ -38,7 +38,7 @@ func runCollectionBenchGet( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() dockeys, err := benchutils.BackfillBenchmarkDB( b, @@ -123,7 +123,7 @@ func runCollectionBenchCreate( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() _, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync) if err != nil { @@ -149,7 +149,7 @@ func runCollectionBenchCreateMany( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() _, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync) if err != nil { diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go index 2f70245b23..273cae0e0b 100644 --- a/tests/bench/query/planner/utils.go +++ b/tests/bench/query/planner/utils.go @@ -59,7 +59,7 @@ func runMakePlanBench( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() parser, err := buildParser(ctx, fixture) if err != nil { diff --git a/tests/bench/query/simple/utils.go b/tests/bench/query/simple/utils.go index e7f374dc40..8c6f82579b 100644 --- a/tests/bench/query/simple/utils.go +++ b/tests/bench/query/simple/utils.go @@ -39,7 +39,7 @@ func RunQueryBenchGet( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() dockeys, err := benchutils.BackfillBenchmarkDB( b, diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 6176273b02..8db991063e 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -27,41 +27,54 @@ import ( "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/net" ) -var _ client.DB = (*Wrapper)(nil) +var _ client.P2P = (*Wrapper)(nil) type Wrapper struct { - db client.DB - store client.Store + node *net.Node cmd *cliWrapper handler *http.Handler httpServer *httptest.Server } -func NewWrapper(db client.DB) *Wrapper { - handler := http.NewHandler(db, http.ServerOptions{}) +func NewWrapper(node *net.Node) *Wrapper { + handler := http.NewHandler(node, http.ServerOptions{}) httpServer := httptest.NewServer(handler) cmd := newCliWrapper(httpServer.URL) return &Wrapper{ - db: db, - store: db, + node: node, cmd: cmd, httpServer: httpServer, handler: handler, } } +func (w *Wrapper) PeerInfo() peer.AddrInfo { + args := []string{"client", "p2p", "info"} + + data, err := w.cmd.execute(context.Background(), args) + if err != nil { + panic(fmt.Sprintf("failed to get peer info: %v", err)) + } + var info peer.AddrInfo + if err := json.Unmarshal(data, &info); err != nil { + panic(fmt.Sprintf("failed to get peer info: %v", err)) + } + return info +} + func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error { args := []string{"client", "p2p", "replicator", "set"} args = append(args, "--collection", strings.Join(rep.Schemas, ",")) - addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info) + info, err := json.Marshal(rep.Info) if err != nil { return err } - args = append(args, addrs[0].String()) + args = append(args, string(info)) _, err = w.cmd.execute(ctx, args) return err @@ -69,12 +82,13 @@ func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) erro func (w *Wrapper) DeleteReplicator(ctx context.Context, rep client.Replicator) error { args := []string{"client", "p2p", "replicator", "delete"} + args = append(args, "--collection", strings.Join(rep.Schemas, ",")) - addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info) + info, err := json.Marshal(rep.Info) if err != nil { return err } - args = append(args, addrs[0].String()) + args = append(args, string(info)) _, err = w.cmd.execute(ctx, args) return err @@ -386,34 +400,45 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { return &Wrapper{ - db: w.db, - store: w.db.WithTxn(tx), - cmd: w.cmd.withTxn(tx), + node: w.node, + cmd: w.cmd.withTxn(tx), } } func (w *Wrapper) Root() datastore.RootStore { - return w.db.Root() + return w.node.Root() } func (w *Wrapper) Blockstore() blockstore.Blockstore { - return w.db.Blockstore() + return w.node.Blockstore() } -func (w *Wrapper) Close(ctx context.Context) { +func (w *Wrapper) Close() { w.httpServer.CloseClientConnections() w.httpServer.Close() - w.db.Close(ctx) + w.node.Close() } func (w *Wrapper) Events() events.Events { - return w.db.Events() + return w.node.Events() } func (w *Wrapper) MaxTxnRetries() int { - return w.db.MaxTxnRetries() + return w.node.MaxTxnRetries() } func (w *Wrapper) PrintDump(ctx context.Context) error { - return w.db.PrintDump(ctx) + return w.node.PrintDump(ctx) +} + +func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { + w.node.Bootstrap(addrs) +} + +func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogByPeerEvent(id) +} + +func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogFromPeerEvent(id) } diff --git a/tests/clients/clients.go b/tests/clients/clients.go new file mode 100644 index 0000000000..10df14212f --- /dev/null +++ b/tests/clients/clients.go @@ -0,0 +1,26 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clients + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +// Client implements the P2P interface along with a few other methods +// required for testing. +type Client interface { + client.P2P + Bootstrap([]peer.AddrInfo) + WaitForPushLogByPeerEvent(peer.ID) error + WaitForPushLogFromPeerEvent(peer.ID) error +} diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index b5ef61c037..35cd55f466 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -15,26 +15,28 @@ import ( "net/http/httptest" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/net" ) -var _ client.DB = (*Wrapper)(nil) +var _ client.P2P = (*Wrapper)(nil) // Wrapper combines an HTTP client and server into a // single struct that implements the client.DB interface. type Wrapper struct { - db client.DB + node *net.Node handler *http.Handler client *http.Client httpServer *httptest.Server } -func NewWrapper(db client.DB) (*Wrapper, error) { - handler := http.NewHandler(db, http.ServerOptions{}) +func NewWrapper(node *net.Node) (*Wrapper, error) { + handler := http.NewHandler(node, http.ServerOptions{}) httpServer := httptest.NewServer(handler) client, err := http.NewClient(httpServer.URL) @@ -43,13 +45,17 @@ func NewWrapper(db client.DB) (*Wrapper, error) { } return &Wrapper{ - db, + node, handler, client, httpServer, }, nil } +func (w *Wrapper) PeerInfo() peer.AddrInfo { + return w.client.PeerInfo() +} + func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error { return w.client.SetReplicator(ctx, rep) } @@ -155,27 +161,39 @@ func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { } func (w *Wrapper) Root() datastore.RootStore { - return w.db.Root() + return w.node.Root() } func (w *Wrapper) Blockstore() blockstore.Blockstore { - return w.db.Blockstore() + return w.node.Blockstore() } -func (w *Wrapper) Close(ctx context.Context) { +func (w *Wrapper) Close() { w.httpServer.CloseClientConnections() w.httpServer.Close() - w.db.Close(ctx) + w.node.Close() } func (w *Wrapper) Events() events.Events { - return w.db.Events() + return w.node.Events() } func (w *Wrapper) MaxTxnRetries() int { - return w.db.MaxTxnRetries() + return w.node.MaxTxnRetries() } func (w *Wrapper) PrintDump(ctx context.Context) error { - return w.db.PrintDump(ctx) + return w.node.PrintDump(ctx) +} + +func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { + w.node.Bootstrap(addrs) +} + +func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogByPeerEvent(id) +} + +func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogFromPeerEvent(id) } diff --git a/tests/integration/client.go b/tests/integration/client.go new file mode 100644 index 0000000000..a6159900cc --- /dev/null +++ b/tests/integration/client.go @@ -0,0 +1,85 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "fmt" + "os" + "strconv" + + "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/tests/clients" + "github.com/sourcenetwork/defradb/tests/clients/cli" + "github.com/sourcenetwork/defradb/tests/clients/http" +) + +const ( + clientGoEnvName = "DEFRA_CLIENT_GO" + clientHttpEnvName = "DEFRA_CLIENT_HTTP" + clientCliEnvName = "DEFRA_CLIENT_CLI" +) + +type ClientType string + +const ( + // goClientType enables running the test suite using + // the go implementation of the client.DB interface. + GoClientType ClientType = "go" + // httpClientType enables running the test suite using + // the http implementation of the client.DB interface. + HTTPClientType ClientType = "http" + // cliClientType enables running the test suite using + // the cli implementation of the client.DB interface. + CLIClientType ClientType = "cli" +) + +var ( + httpClient bool + goClient bool + cliClient bool +) + +func init() { + // We use environment variables instead of flags `go test ./...` throws for all packages + // that don't have the flag defined + httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName)) + goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName)) + cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName)) + + if !goClient && !httpClient && !cliClient { + // Default is to test go client type. + goClient = true + } +} + +// setupClient returns the client implementation for the current +// testing state. The client type on the test state is used to +// select the client implementation to use. +func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { + switch s.clientType { + case HTTPClientType: + impl, err = http.NewWrapper(node) + + case CLIClientType: + impl = cli.NewWrapper(node) + + case GoClientType: + impl = node + + default: + err = fmt.Errorf("invalid client type: %v", s.dbt) + } + + if err != nil { + return nil, err + } + return +} diff --git a/tests/integration/db.go b/tests/integration/db.go new file mode 100644 index 0000000000..561546cfef --- /dev/null +++ b/tests/integration/db.go @@ -0,0 +1,151 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "fmt" + "os" + "strconv" + "testing" + + badger "github.com/dgraph-io/badger/v4" + + "github.com/sourcenetwork/defradb/client" + badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" +) + +type DatabaseType string + +const ( + memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" + fileBadgerEnvName = "DEFRA_BADGER_FILE" + fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" + inMemoryEnvName = "DEFRA_IN_MEMORY" +) + +const ( + badgerIMType DatabaseType = "badger-in-memory" + defraIMType DatabaseType = "defra-memory-datastore" + badgerFileType DatabaseType = "badger-file-system" +) + +var ( + badgerInMemory bool + badgerFile bool + inMemoryStore bool + databaseDir string +) + +func init() { + // We use environment variables instead of flags `go test ./...` throws for all packages + // that don't have the flag defined + badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) + badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) + inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) + + if changeDetector.Enabled { + // Change detector only uses badger file db type. + badgerFile = true + badgerInMemory = false + inMemoryStore = false + } else if !badgerInMemory && !badgerFile && !inMemoryStore { + // Default is to test all but filesystem db types. + badgerFile = false + badgerInMemory = true + inMemoryStore = true + } +} + +func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { + opts := badgerds.Options{ + Options: badger.DefaultOptions("").WithInMemory(true), + } + rootstore, err := badgerds.NewDatastore("", &opts) + if err != nil { + return nil, err + } + db, err := db.NewDB(ctx, rootstore, dbopts...) + if err != nil { + return nil, err + } + return db, nil +} + +func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { + db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...) + if err != nil { + return nil, err + } + return db, nil +} + +func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) { + var dbPath string + switch { + case databaseDir != "": + // restarting database + dbPath = databaseDir + + case changeDetector.Enabled: + // change detector + dbPath = changeDetector.DatabaseDir(t) + + default: + // default test case + dbPath = t.TempDir() + } + + opts := &badgerds.Options{ + Options: badger.DefaultOptions(dbPath), + } + rootstore, err := badgerds.NewDatastore(dbPath, opts) + if err != nil { + return nil, "", err + } + db, err := db.NewDB(ctx, rootstore, dbopts...) + if err != nil { + return nil, "", err + } + return db, dbPath, err +} + +// setupDatabase returns the database implementation for the current +// testing state. The database type on the test state is used to +// select the datastore implementation to use. +func setupDatabase(s *state) (impl client.DB, path string, err error) { + dbopts := []db.Option{ + db.WithUpdateEvents(), + db.WithLensPoolSize(lensPoolSize), + } + + switch s.dbt { + case badgerIMType: + impl, err = NewBadgerMemoryDB(s.ctx, dbopts...) + + case badgerFileType: + impl, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...) + + case defraIMType: + impl, err = NewInMemoryDB(s.ctx, dbopts...) + + default: + err = fmt.Errorf("invalid database type: %v", s.dbt) + } + + if err != nil { + return nil, "", err + } + return +} diff --git a/tests/integration/explain.go b/tests/integration/explain.go index 44c457c0f8..da7a1106e2 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -125,7 +125,7 @@ func executeExplainRequest( } for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) assertExplainRequestResults(s, &result.GQL, action) } } diff --git a/tests/integration/lens.go b/tests/integration/lens.go index 317864ab3e..e69437d87b 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -57,7 +57,7 @@ func configureMigration( action ConfigureMigration, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + db := getStore(s, node, action.TransactionID, action.ExpectedError) err := db.SetMigration(s.ctx, action.LensConfig) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) @@ -71,7 +71,7 @@ func getMigrations( action GetMigrations, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, "") + db := getStore(s, node, action.TransactionID, "") configs, err := db.LensRegistry().Config(s.ctx) require.NoError(s.t, err) diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 5470d8aee7..e01dd612cd 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -114,10 +114,7 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node } if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - return nil, nil, errors.Wrap(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() return nil, nil, errors.Wrap("unable to start P2P listeners", err) } @@ -206,9 +203,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) { log.Info(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") continue } + peerInfo := nodes[p].PeerInfo() peerAddresses = append( peerAddresses, - fmt.Sprintf("%s/p2p/%s", test.NodeConfig[p].Net.P2PAddress, nodes[p].PeerID()), + fmt.Sprintf("%s/p2p/%s", peerInfo.Addrs[0], peerInfo.ID), ) } cfg.Net.Peers = strings.Join(peerAddresses, ",") @@ -260,7 +258,7 @@ func executeTestCase(t *testing.T, test P2PTestCase) { continue } log.Info(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n)) - err := p.WaitForPushLogByPeerEvent(nodes[n].PeerID()) + err := p.WaitForPushLogByPeerEvent(nodes[n].PeerInfo().ID) require.NoError(t, err) log.Info(ctx, fmt.Sprintf("Node %d synced", n2)) } @@ -340,15 +338,14 @@ func executeTestCase(t *testing.T, test P2PTestCase) { // clean up for _, n := range nodes { - if err := n.Close(); err != nil { - log.Info(ctx, "node not closing as expected", logging.NewKV("Error", err.Error())) - } - n.DB.Close(ctx) + n.Close() + n.DB.Close() } } func randomNetworkingConfig() *config.Config { cfg := config.DefaultConfig() cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0" + cfg.Net.RelayEnabled = false return cfg } diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go index 65d6cfd6ce..d6285f9bd0 100644 --- a/tests/integration/net/state/simple/replicator/with_create_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_test.go @@ -12,6 +12,7 @@ package replicator import ( "testing" + "time" "github.com/sourcenetwork/immutable" @@ -150,6 +151,55 @@ func TestP2POneToOneReplicatorDoesNotSyncFromTargetToSource(t *testing.T) { testUtils.ExecuteTestCase(t, test) } +func TestP2POneToOneReplicatorDoesNotSyncFromDeletedReplicator(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.ConfigureReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.DeleteReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.CreateDoc{ + // Create John on the first (source) node only + NodeID: immutable.Some(0), + Doc: `{ + "Name": "John", + "Age": 21 + }`, + }, + testUtils.WaitForSync{ + // No documents should be synced + ExpectedTimeout: 100 * time.Millisecond, + }, + testUtils.Request{ + // Assert that John has not been synced to the second (target) node + NodeID: immutable.Some(1), + Request: `query { + Users { + Age + } + }`, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + func TestP2POneToManyReplicator(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index e04e16bb0f..de02c0806c 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -11,15 +11,14 @@ package tests import ( - "fmt" "time" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" - netutils "github.com/sourcenetwork/defradb/net/utils" + "github.com/sourcenetwork/defradb/tests/clients" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -58,11 +57,20 @@ type ConfigureReplicator struct { TargetNodeID int } +// DeleteReplicator deletes a directional replicator relationship between two nodes. +type DeleteReplicator struct { + // SourceNodeID is the node ID (index) of the node from which the replicator should be deleted. + SourceNodeID int + + // TargetNodeID is the node ID (index) of the node to which the replicator should be deleted. + TargetNodeID int +} + const ( // NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted // for a non-existent collection ID when used in actions that support this. - NonExistentCollectionID int = -1 - NonExistentCollectionSchemaID = "NonExistentCollectionID" + NonExistentCollectionID int = -1 + NonExistentCollectionSchemaID string = "NonExistentCollectionID" ) // SubscribeToCollection sets up a subscription on the given node to the given collection. @@ -121,7 +129,10 @@ type GetAllP2PCollections struct { // // For example you will likely wish to `WaitForSync` after creating a document in node 0 before querying // node 1 to see if it has been replicated. -type WaitForSync struct{} +type WaitForSync struct { + // ExpectedTimeout is the duration to wait when expecting a timeout to occur. + ExpectedTimeout time.Duration +} // connectPeers connects two existing, started, nodes as peers. It returns a channel // that will receive an empty struct upon sync completion of all expected peer-sync events. @@ -136,13 +147,8 @@ func connectPeers( time.Sleep(100 * time.Millisecond) sourceNode := s.nodes[cfg.SourceNodeID] targetNode := s.nodes[cfg.TargetNodeID] - targetAddress := s.nodeAddresses[cfg.TargetNodeID] - log.Info(s.ctx, "Parsing bootstrap peers", logging.NewKV("Peers", targetAddress)) - addrs, err := netutils.ParsePeers([]string{targetAddress}) - if err != nil { - s.t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err) - } + addrs := []peer.AddrInfo{targetNode.PeerInfo()} log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) sourceNode.Bootstrap(addrs) @@ -157,12 +163,16 @@ func setupPeerWaitSync( s *state, startIndex int, cfg ConnectPeers, - sourceNode *net.Node, - targetNode *net.Node, + sourceNode clients.Client, + targetNode clients.Client, ) { - nodeCollections := map[int][]int{} sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} + + sourcePeerInfo := sourceNode.PeerInfo() + targetPeerInfo := targetNode.PeerInfo() + + nodeCollections := map[int][]int{} waitIndex := 0 for i := startIndex; i < len(s.testCase.Actions); i++ { switch action := s.testCase.Actions[i].(type) { @@ -247,11 +257,11 @@ func setupPeerWaitSync( ready <- struct{}{} for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) + err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) + err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) require.NoError(s.t, err) } nodeSynced <- struct{}{} @@ -294,22 +304,39 @@ func configureReplicator( sourceNode := s.nodes[cfg.SourceNodeID] targetNode := s.nodes[cfg.TargetNodeID] - err := sourceNode.Peer.SetReplicator(s.ctx, client.Replicator{ + err := sourceNode.SetReplicator(s.ctx, client.Replicator{ Info: targetNode.PeerInfo(), }) require.NoError(s.t, err) setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) } +func deleteReplicator( + s *state, + cfg DeleteReplicator, +) { + sourceNode := s.nodes[cfg.SourceNodeID] + targetNode := s.nodes[cfg.TargetNodeID] + + err := sourceNode.DeleteReplicator(s.ctx, client.Replicator{ + Info: targetNode.PeerInfo(), + }) + require.NoError(s.t, err) +} + func setupReplicatorWaitSync( s *state, startIndex int, cfg ConfigureReplicator, - sourceNode *net.Node, - targetNode *net.Node, + sourceNode clients.Client, + targetNode clients.Client, ) { sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} + + sourcePeerInfo := sourceNode.PeerInfo() + targetPeerInfo := targetNode.PeerInfo() + docIDsSyncedToSource := map[int]struct{}{} waitIndex := 0 currentDocID := 0 @@ -361,11 +388,11 @@ func setupReplicatorWaitSync( ready <- struct{}{} for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) + err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) + err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) require.NoError(s.t, err) } nodeSynced <- struct{}{} @@ -466,14 +493,31 @@ func waitForSync( s *state, action WaitForSync, ) { + var timeout time.Duration + if action.ExpectedTimeout != 0 { + timeout = action.ExpectedTimeout + } else { + timeout = subscriptionTimeout * 10 + } + for _, resultsChan := range s.syncChans { select { case <-resultsChan: - continue + assert.True( + s.t, + action.ExpectedTimeout == 0, + "unexpected document has been synced", + s.testCase.Description, + ) // a safety in case the stream hangs - we don't want the tests to run forever. - case <-time.After(subscriptionTimeout * 10): - assert.Fail(s.t, "timeout occurred while waiting for data stream", s.testCase.Description) + case <-time.After(timeout): + assert.True( + s.t, + action.ExpectedTimeout != 0, + "timeout occurred while waiting for data stream", + s.testCase.Description, + ) } } } @@ -482,6 +526,7 @@ func RandomNetworkingConfig() ConfigureNode { return func() config.Config { cfg := config.DefaultConfig() cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0" + cfg.Net.RelayEnabled = false return *cfg } } diff --git a/tests/integration/state.go b/tests/integration/state.go index 5e47e0adfe..ca795a2492 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -15,11 +15,12 @@ import ( "testing" "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/tests/clients" ) type state struct { @@ -56,13 +57,13 @@ type state struct { nodePrivateKeys []crypto.PrivKey // The addresses of any nodes configured. - nodeAddresses []string + nodeAddresses []peer.AddrInfo // The configurations for any nodes nodeConfigs []config.Config // The nodes active in this test. - nodes []*net.Node + nodes []clients.Client // The paths to any file-based databases active in this test. dbPaths []string @@ -108,9 +109,9 @@ func newState( subscriptionResultsChans: []chan func(){}, syncChans: []chan struct{}{}, nodePrivateKeys: []crypto.PrivKey{}, - nodeAddresses: []string{}, + nodeAddresses: []peer.AddrInfo{}, nodeConfigs: []config.Config{}, - nodes: []*net.Node{}, + nodes: []clients.Client{}, dbPaths: []string{}, collections: [][]client.Collection{}, collectionNames: collectionNames, diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 9e8c71792e..01c6c7c69f 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -16,12 +16,10 @@ import ( "fmt" "os" "reflect" - "strconv" "strings" "testing" "time" - badger "github.com/dgraph-io/badger/v4" "github.com/libp2p/go-libp2p/core/crypto" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" @@ -30,48 +28,14 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" - "github.com/sourcenetwork/defradb/tests/clients/cli" - "github.com/sourcenetwork/defradb/tests/clients/http" + "github.com/sourcenetwork/defradb/tests/clients" ) -const ( - clientGoEnvName = "DEFRA_CLIENT_GO" - clientHttpEnvName = "DEFRA_CLIENT_HTTP" - clientCliEnvName = "DEFRA_CLIENT_CLI" - memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" - fileBadgerEnvName = "DEFRA_BADGER_FILE" - fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" - inMemoryEnvName = "DEFRA_IN_MEMORY" - mutationTypeEnvName = "DEFRA_MUTATION_TYPE" -) - -type DatabaseType string - -const ( - badgerIMType DatabaseType = "badger-in-memory" - defraIMType DatabaseType = "defra-memory-datastore" - badgerFileType DatabaseType = "badger-file-system" -) - -type ClientType string - -const ( - // GoClientType enables running the test suite using - // the go implementation of the client.DB interface. - GoClientType ClientType = "go" - // HTTPClientType enables running the test suite using - // the http implementation of the client.DB interface. - HTTPClientType ClientType = "http" - // CLIClientType enables running the test suite using - // the cli implementation of the client.DB interface. - CLIClientType ClientType = "cli" -) +const mutationTypeEnvName = "DEFRA_MUTATION_TYPE" // The MutationType that tests will run using. // @@ -101,15 +65,8 @@ const ( ) var ( - log = logging.MustNewLogger("tests.integration") - badgerInMemory bool - badgerFile bool - inMemoryStore bool - httpClient bool - goClient bool - cliClient bool - mutationType MutationType - databaseDir string + log = logging.MustNewLogger("tests.integration") + mutationType MutationType ) const ( @@ -122,14 +79,7 @@ const ( func init() { // We use environment variables instead of flags `go test ./...` throws for all packages - // that don't have the flag defined - httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName)) - goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName)) - cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName)) - badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) - badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) - inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) - + // that don't have the flag defined if value, ok := os.LookupEnv(mutationTypeEnvName); ok { mutationType = MutationType(value) } else { @@ -138,23 +88,6 @@ func init() { // mutation type. mutationType = CollectionSaveMutationType } - - if !goClient && !httpClient && !cliClient { - // Default is to test go client type. - goClient = true - } - - if changeDetector.Enabled { - // Change detector only uses badger file db type. - badgerFile = true - badgerInMemory = false - inMemoryStore = false - } else if !badgerInMemory && !badgerFile && !inMemoryStore { - // Default is to test all but filesystem db types. - badgerFile = false - badgerInMemory = true - inMemoryStore = true - } } // AssertPanic asserts that the code inside the specified PanicTestFunc panics. @@ -178,107 +111,6 @@ func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { return assert.Panics(t, f, "expected a panic, but none found.") } -func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - opts := badgerds.Options{ - Options: badger.DefaultOptions("").WithInMemory(true), - } - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - return nil, err - } - db, err := db.NewDB(ctx, rootstore, dbopts...) - if err != nil { - return nil, err - } - return db, nil -} - -func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...) - if err != nil { - return nil, err - } - return db, nil -} - -func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) { - var dbPath string - switch { - case databaseDir != "": - // restarting database - dbPath = databaseDir - - case changeDetector.Enabled: - // change detector - dbPath = changeDetector.DatabaseDir(t) - - default: - // default test case - dbPath = t.TempDir() - } - - opts := &badgerds.Options{ - Options: badger.DefaultOptions(dbPath), - } - rootstore, err := badgerds.NewDatastore(dbPath, opts) - if err != nil { - return nil, "", err - } - db, err := db.NewDB(ctx, rootstore, dbopts...) - if err != nil { - return nil, "", err - } - return db, dbPath, err -} - -// GetDatabase returns the database implementation for the current -// testing state. The database type and client type on the test state -// are used to select the datastore and client implementation to use. -func GetDatabase(s *state) (cdb client.DB, path string, err error) { - dbopts := []db.Option{ - db.WithUpdateEvents(), - db.WithLensPoolSize(lensPoolSize), - } - - switch s.dbt { - case badgerIMType: - cdb, err = NewBadgerMemoryDB(s.ctx, dbopts...) - - case badgerFileType: - cdb, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...) - - case defraIMType: - cdb, err = NewInMemoryDB(s.ctx, dbopts...) - - default: - err = fmt.Errorf("invalid database type: %v", s.dbt) - } - - if err != nil { - return nil, "", err - } - - switch s.clientType { - case HTTPClientType: - cdb, err = http.NewWrapper(cdb) - - case CLIClientType: - cdb = cli.NewWrapper(cdb) - - case GoClientType: - return - - default: - err = fmt.Errorf("invalid client type: %v", s.dbt) - } - - if err != nil { - return nil, "", err - } - - return -} - // ExecuteTestCase executes the given TestCase against the configured database // instances. // @@ -404,6 +236,9 @@ func performAction( case ConfigureReplicator: configureReplicator(s, action) + case DeleteReplicator: + deleteReplicator(s, action) + case SubscribeToCollection: subscribeToCollection(s, action) @@ -585,23 +420,19 @@ func closeNodes( s *state, ) { for _, node := range s.nodes { - if node.Peer != nil { - err := node.Close() - require.NoError(s.t, err) - } - node.DB.Close(s.ctx) + node.Close() } } // getNodes gets the set of applicable nodes for the given nodeID. // // If nodeID has a value it will return that node only, otherwise all nodes will be returned. -func getNodes(nodeID immutable.Option[int], nodes []*net.Node) []*net.Node { +func getNodes(nodeID immutable.Option[int], nodes []clients.Client) []clients.Client { if !nodeID.HasValue() { return nodes } - return []*net.Node{nodes[nodeID.Value()]} + return []clients.Client{nodes[nodeID.Value()]} } // getNodeCollections gets the set of applicable collections for the given nodeID. @@ -729,12 +560,13 @@ func setStartingNodes( // If nodes have not been explicitly configured via actions, setup a default one. if !hasExplicitNode { - db, path, err := GetDatabase(s) + db, path, err := setupDatabase(s) require.Nil(s.t, err) - s.nodes = append(s.nodes, &net.Node{ - DB: db, - }) + c, err := setupClient(s, &net.Node{DB: db}) + require.Nil(s.t, err) + + s.nodes = append(s.nodes, c) s.dbPaths = append(s.dbPaths, path) } } @@ -752,16 +584,16 @@ func restartNodes( for i := len(s.nodes) - 1; i >= 0; i-- { originalPath := databaseDir databaseDir = s.dbPaths[i] - db, _, err := GetDatabase(s) + db, _, err := setupDatabase(s) require.Nil(s.t, err) databaseDir = originalPath if len(s.nodeConfigs) == 0 { // If there are no explicit node configuration actions the node will be // basic (i.e. no P2P stuff) and can be yielded now. - s.nodes[i] = &net.Node{ - DB: db, - } + c, err := setupClient(s, &net.Node{DB: db}) + require.NoError(s.t, err) + s.nodes[i] = c continue } @@ -769,7 +601,8 @@ func restartNodes( cfg := s.nodeConfigs[i] // We need to make sure the node is configured with its old address, otherwise // a new one may be selected and reconnnection to it will fail. - cfg.Net.P2PAddress = strings.Split(s.nodeAddresses[i], "/p2p/")[0] + cfg.Net.P2PAddress = s.nodeAddresses[i].Addrs[0].String() + var n *net.Node n, err = net.NewNode( s.ctx, @@ -780,14 +613,13 @@ func restartNodes( require.NoError(s.t, err) if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() require.NoError(s.t, err) } - s.nodes[i] = n + c, err := setupClient(s, n) + require.NoError(s.t, err) + s.nodes[i] = c } // The index of the action after the last wait action before the current restart action. @@ -838,7 +670,7 @@ func refreshCollections( for nodeID, node := range s.nodes { s.collections[nodeID] = make([]client.Collection, len(s.collectionNames)) - allCollections, err := node.DB.GetAllCollections(s.ctx) + allCollections, err := node.GetAllCollections(s.ctx) require.Nil(s.t, err) for i, collectionName := range s.collectionNames { @@ -867,7 +699,7 @@ func configureNode( } cfg := action() - db, path, err := GetDatabase(s) //disable change dector, or allow it? + db, path, err := setupDatabase(s) //disable change dector, or allow it? require.NoError(s.t, err) privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) @@ -883,20 +715,20 @@ func configureNode( ) require.NoError(s.t, err) + log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", n.PeerInfo())) if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() require.NoError(s.t, err) } - address := fmt.Sprintf("%s/p2p/%s", n.ListenAddrs()[0].String(), n.PeerID()) - s.nodeAddresses = append(s.nodeAddresses, address) + s.nodeAddresses = append(s.nodeAddresses, n.PeerInfo()) s.nodeConfigs = append(s.nodeConfigs, cfg) s.nodePrivateKeys = append(s.nodePrivateKeys, privateKey) - s.nodes = append(s.nodes, n) + c, err := setupClient(s, n) + require.NoError(s.t, err) + + s.nodes = append(s.nodes, c) s.dbPaths = append(s.dbPaths, path) } @@ -1083,7 +915,7 @@ func updateSchema( action SchemaUpdate, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - _, err := node.DB.AddSchema(s.ctx, action.Schema) + _, err := node.AddSchema(s.ctx, action.Schema) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1106,7 +938,7 @@ func patchSchema( setAsDefaultVersion = true } - err := node.DB.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) + err := node.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1122,7 +954,7 @@ func setDefaultSchemaVersion( action SetDefaultSchemaVersion, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - err := node.DB.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) + err := node.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1138,7 +970,7 @@ func createDoc( s *state, action CreateDoc, ) { - var mutation func(*state, CreateDoc, *net.Node, []client.Collection) (*client.Document, error) + var mutation func(*state, CreateDoc, client.P2P, []client.Collection) (*client.Document, error) switch mutationType { case CollectionSaveMutationType: @@ -1179,7 +1011,7 @@ func createDoc( func createDocViaColSave( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1194,7 +1026,7 @@ func createDocViaColSave( func createDocViaColCreate( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1209,7 +1041,7 @@ func createDocViaColCreate( func createDocViaGQL( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { collection := collections[action.CollectionID] @@ -1227,7 +1059,7 @@ func createDocViaGQL( escapedJson, ) - db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError) + db := getStore(s, node, immutable.None[int](), action.ExpectedError) result := db.ExecRequest(s.ctx, request) if len(result.GQL.Errors) > 0 { @@ -1279,7 +1111,7 @@ func updateDoc( s *state, action UpdateDoc, ) { - var mutation func(*state, UpdateDoc, *net.Node, []client.Collection) error + var mutation func(*state, UpdateDoc, client.P2P, []client.Collection) error switch mutationType { case CollectionSaveMutationType: @@ -1309,7 +1141,7 @@ func updateDoc( func updateDocViaColSave( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1325,7 +1157,7 @@ func updateDocViaColSave( func updateDocViaColUpdate( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1341,7 +1173,7 @@ func updateDocViaColUpdate( func updateDocViaGQL( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1361,7 +1193,7 @@ func updateDocViaGQL( escapedJson, ) - db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError) + db := getStore(s, node, immutable.None[int](), action.ExpectedError) result := db.ExecRequest(s.ctx, request) if len(result.GQL.Errors) > 0 { @@ -1461,7 +1293,7 @@ func backupExport( err := withRetry( actionNodes, nodeID, - func() error { return node.DB.BasicExport(s.ctx, &action.Config) }, + func() error { return node.BasicExport(s.ctx, &action.Config) }, ) expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) @@ -1491,7 +1323,7 @@ func backupImport( err := withRetry( actionNodes, nodeID, - func() error { return node.DB.BasicImport(s.ctx, action.Filepath) }, + func() error { return node.BasicImport(s.ctx, action.Filepath) }, ) expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) } @@ -1506,11 +1338,11 @@ func backupImport( // about this in our tests so we just retry a few times until it works (or the // retry limit is breached - important incase this is a different error) func withRetry( - nodes []*net.Node, + nodes []clients.Client, nodeID int, action func() error, ) error { - for i := 0; i < nodes[nodeID].DB.MaxTxnRetries(); i++ { + for i := 0; i < nodes[nodeID].MaxTxnRetries(); i++ { err := action() if err != nil && errors.Is(err, badgerds.ErrTxnConflict) { time.Sleep(100 * time.Millisecond) @@ -1577,7 +1409,7 @@ func executeRequest( ) { var expectedErrorRaised bool for nodeID, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + db := getStore(s, node, action.TransactionID, action.ExpectedError) result := db.ExecRequest(s.ctx, action.Request) anyOfByFieldKey := map[docFieldKey][]any{} @@ -1610,7 +1442,7 @@ func executeSubscriptionRequest( subscriptionAssert := make(chan func()) for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return } @@ -1791,7 +1623,7 @@ func assertIntrospectionResults( action IntrospectionRequest, ) bool { for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return true @@ -1822,7 +1654,7 @@ func assertClientIntrospectionResults( action ClientIntrospectionRequest, ) bool { for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return true From 20b6329e912b72f27ae24cca89b8d255d24f175a Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 17 Oct 2023 12:17:46 -0400 Subject: [PATCH 28/55] feat: Remove collection from patch schema (#1957) ## Relevant issue(s) Resolves #1956 ## Description Removes collectionDescription from patch schema. PatchSchema now only uses collectionDescription internally to update it if requested (bool param). This now means that the only place remaining (minus a util func that got missed) that refs collectionDescription.Schema should be the serialization. This will be sorted out in a later PR, before removal of the property completely. --- client/db.go | 2 +- db/collection.go | 78 +------- db/schema.go | 73 +++---- .../simple/peer/with_create_add_field_test.go | 6 +- .../simple/peer/with_update_add_field_test.go | 4 +- .../replicator/with_create_add_field_test.go | 6 +- .../replicator/with_update_add_field_test.go | 4 +- .../schema/migrations/query/simple_test.go | 32 +-- .../migrations/query/with_dockey_test.go | 4 +- .../schema/migrations/query/with_p2p_test.go | 8 +- .../migrations/query/with_restart_test.go | 2 +- .../migrations/query/with_set_default_test.go | 6 +- .../schema/migrations/query/with_txn_test.go | 4 +- .../migrations/query/with_update_test.go | 4 +- .../updates/add/field/crdt/composite_test.go | 2 +- .../updates/add/field/crdt/invalid_test.go | 2 +- .../schema/updates/add/field/crdt/lww_test.go | 2 +- .../updates/add/field/crdt/none_test.go | 4 +- .../add/field/crdt/object_bool_test.go | 2 +- .../schema/updates/add/field/create_test.go | 4 +- .../updates/add/field/create_update_test.go | 4 +- .../updates/add/field/kind/bool_array_test.go | 6 +- .../add/field/kind/bool_nil_array_test.go | 6 +- .../updates/add/field/kind/bool_test.go | 6 +- .../updates/add/field/kind/datetime_test.go | 6 +- .../updates/add/field/kind/dockey_test.go | 6 +- .../add/field/kind/float_array_test.go | 6 +- .../add/field/kind/float_nil_array_test.go | 6 +- .../updates/add/field/kind/float_test.go | 6 +- .../field/kind/foreign_object_array_test.go | 114 +++++------ .../add/field/kind/foreign_object_test.go | 122 ++++++------ .../updates/add/field/kind/int_array_test.go | 6 +- .../add/field/kind/int_nil_array_test.go | 6 +- .../schema/updates/add/field/kind/int_test.go | 6 +- .../updates/add/field/kind/invalid_test.go | 16 +- .../updates/add/field/kind/none_test.go | 2 +- .../add/field/kind/string_array_test.go | 6 +- .../add/field/kind/string_nil_array_test.go | 6 +- .../updates/add/field/kind/string_test.go | 6 +- .../schema/updates/add/field/simple_test.go | 26 +-- .../updates/add/field/with_filter_test.go | 4 +- .../updates/add/field/with_index_sub_test.go | 10 +- .../add/field/with_introspection_test.go | 6 +- .../schema/updates/add/simple_test.go | 4 +- .../schema/updates/copy/field/simple_test.go | 24 +-- .../copy/field/with_introspection_test.go | 6 +- .../schema/updates/copy/simple_test.go | 10 +- .../schema/updates/index/simple_test.go | 184 +----------------- .../schema/updates/move/field/simple_test.go | 2 +- .../updates/remove/fields/simple_test.go | 18 +- .../schema/updates/remove/simple_test.go | 33 +--- .../updates/replace/field/simple_test.go | 4 +- .../schema/updates/replace/simple_test.go | 9 +- .../schema/updates/test/add_field_test.go | 8 +- .../schema/updates/test/field/simple_test.go | 16 +- .../schema/with_update_set_default_test.go | 8 +- 56 files changed, 343 insertions(+), 620 deletions(-) diff --git a/client/db.go b/client/db.go index 5e4873d8dc..0b947ad88a 100644 --- a/client/db.go +++ b/client/db.go @@ -92,7 +92,7 @@ type Store interface { // types previously defined. AddSchema(context.Context, string) ([]CollectionDescription, error) - // PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions + // PatchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions // present in the database. If true is provided, the new schema versions will be made default, otherwise // [SetDefaultSchemaVersion] should be called to set them so. // diff --git a/db/collection.go b/db/collection.go index 8fdf9089ed..b86024e16e 100644 --- a/db/collection.go +++ b/db/collection.go @@ -190,18 +190,17 @@ func (db *db) createCollection( return db.getCollectionByName(ctx, txn, desc.Name) } -// updateCollection updates the persisted collection description matching the name of the given +// updateSchema updates the persisted schema description matching the name of the given // description, to the values in the given description. // -// It will validate the given description using [ValidateUpdateCollectionTxn] before updating it. +// It will validate the given description using [validateUpdateSchema] before updating it. // -// The collection (including the schema version ID) will only be updated if any changes have actually +// The schema (including the schema version ID) will only be updated if any changes have actually // been made, if the given description matches the current persisted description then no changes will be // applied. -func (db *db) updateCollection( +func (db *db) updateSchema( ctx context.Context, txn datastore.Txn, - existingDescriptionsByName map[string]client.CollectionDescription, existingSchemaByName map[string]client.SchemaDescription, proposedDescriptionsByName map[string]client.SchemaDescription, def client.CollectionDefinition, @@ -210,12 +209,7 @@ func (db *db) updateCollection( schema := def.Schema desc := def.Description - hasChanged, err := db.validateUpdateCollection(ctx, existingDescriptionsByName, desc) - if err != nil { - return nil, err - } - - hasSchemaChanged, err := db.validateUpdateSchema( + hasChanged, err := db.validateUpdateSchema( ctx, txn, existingSchemaByName, @@ -226,7 +220,6 @@ func (db *db) updateCollection( return nil, err } - hasChanged = hasChanged || hasSchemaChanged if !hasChanged { return db.getCollectionByName(ctx, txn, desc.Name) } @@ -304,32 +297,6 @@ func (db *db) updateCollection( return db.getCollectionByName(ctx, txn, desc.Name) } -// validateUpdateCollection validates that the given collection description is a valid update. -// -// Will return true if the given description differs from the current persisted state of the -// collection. Will return an error if it fails validation. -func (db *db) validateUpdateCollection( - ctx context.Context, - existingDescriptionsByName map[string]client.CollectionDescription, - proposedDesc client.CollectionDescription, -) (bool, error) { - if proposedDesc.Name == "" { - return false, ErrCollectionNameEmpty - } - - existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] - if !collectionExists { - return false, NewErrAddCollectionWithPatch(proposedDesc.Name) - } - - if proposedDesc.ID != existingDesc.ID { - return false, NewErrCollectionIDDoesntMatch(proposedDesc.Name, existingDesc.ID, proposedDesc.ID) - } - - hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) - return hasChangedIndexes, err -} - // validateUpdateSchema validates that the given schema description is a valid update. // // Will return true if the given description differs from the current persisted state of the @@ -341,10 +308,6 @@ func (db *db) validateUpdateSchema( proposedDescriptionsByName map[string]client.SchemaDescription, proposedDesc client.SchemaDescription, ) (bool, error) { - if proposedDesc.Name == "" { - return false, ErrSchemaNameEmpty - } - existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] if !collectionExists { return false, NewErrAddCollectionWithPatch(proposedDesc.Name) @@ -564,37 +527,6 @@ func validateUpdateSchemaFields( return hasChanged, nil } -func validateUpdateCollectionIndexes( - existingIndexes []client.IndexDescription, - proposedIndexes []client.IndexDescription, -) (bool, error) { - existingNameToIndex := map[string]client.IndexDescription{} - for _, index := range existingIndexes { - existingNameToIndex[index.Name] = index - } - for _, proposedIndex := range proposedIndexes { - if existingIndex, exists := existingNameToIndex[proposedIndex.Name]; exists { - if len(existingIndex.Fields) != len(proposedIndex.Fields) { - return false, ErrCanNotChangeIndexWithPatch - } - for i := range existingIndex.Fields { - if existingIndex.Fields[i] != proposedIndex.Fields[i] { - return false, ErrCanNotChangeIndexWithPatch - } - } - delete(existingNameToIndex, proposedIndex.Name) - } else { - return false, NewErrCannotAddIndexWithPatch(proposedIndex.Name) - } - } - if len(existingNameToIndex) > 0 { - for _, index := range existingNameToIndex { - return false, NewErrCannotDropIndexWithPatch(index.Name) - } - } - return false, nil -} - func (db *db) setDefaultSchemaVersion( ctx context.Context, txn datastore.Txn, diff --git a/db/schema.go b/db/schema.go index ec14393563..3b3f4b6eb3 100644 --- a/db/schema.go +++ b/db/schema.go @@ -27,9 +27,8 @@ import ( const ( schemaNamePathIndex int = 0 - schemaPathIndex int = 1 - fieldsPathIndex int = 2 - fieldIndexPathIndex int = 3 + fieldsPathIndex int = 1 + fieldIndexPathIndex int = 2 ) // addSchema takes the provided schema in SDL format, and applies it to the database, @@ -85,7 +84,7 @@ func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error { return db.parser.SetSchema(ctx, txn, definitions) } -// patchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions +// patchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions // present in the database. // // It will also update the GQL types used by the query system. It will error and not apply any of the @@ -113,12 +112,12 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st } // Here we swap out any string representations of enums for their integer values - patch, err = substituteSchemaPatch(patch, collectionsByName) + patch, err = substituteSchemaPatch(patch, existingSchemaByName) if err != nil { return err } - existingDescriptionJson, err := json.Marshal(collectionsByName) + existingDescriptionJson, err := json.Marshal(existingSchemaByName) if err != nil { return err } @@ -128,28 +127,33 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st return err } - var newDescriptionsByName map[string]client.CollectionDescription + var newSchemaByName map[string]client.SchemaDescription decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson))) decoder.DisallowUnknownFields() - err = decoder.Decode(&newDescriptionsByName) + err = decoder.Decode(&newSchemaByName) if err != nil { return err } newCollections := []client.CollectionDefinition{} - newSchemaByName := map[string]client.SchemaDescription{} - for _, desc := range newDescriptionsByName { - def := client.CollectionDefinition{Description: desc, Schema: desc.Schema} + for _, schema := range newSchemaByName { + if schema.Name == "" { + return ErrSchemaNameEmpty + } + + collectionDescription, ok := collectionsByName[schema.Name] + if !ok { + return NewErrAddCollectionWithPatch(schema.Name) + } + def := client.CollectionDefinition{Description: collectionDescription, Schema: schema} newCollections = append(newCollections, def) - newSchemaByName[def.Schema.Name] = def.Schema } for i, col := range newCollections { - col, err := db.updateCollection( + col, err := db.updateSchema( ctx, txn, - collectionsByName, existingSchemaByName, newSchemaByName, col, @@ -189,13 +193,13 @@ func (db *db) getCollectionsByName( // value. func substituteSchemaPatch( patch jsonpatch.Patch, - collectionsByName map[string]client.CollectionDescription, + schemaByName map[string]client.SchemaDescription, ) (jsonpatch.Patch, error) { - fieldIndexesByCollection := make(map[string]map[string]int, len(collectionsByName)) - for colName, col := range collectionsByName { - fieldIndexesByName := make(map[string]int, len(col.Schema.Fields)) - fieldIndexesByCollection[colName] = fieldIndexesByName - for i, field := range col.Schema.Fields { + fieldIndexesBySchema := make(map[string]map[string]int, len(schemaByName)) + for schemaName, schema := range schemaByName { + fieldIndexesByName := make(map[string]int, len(schema.Fields)) + fieldIndexesBySchema[schemaName] = fieldIndexesByName + for i, field := range schema.Fields { fieldIndexesByName[field.Name] = i } } @@ -238,9 +242,9 @@ func substituteSchemaPatch( newPatchValue = immutable.Some[any](field) } - desc := collectionsByName[splitPath[schemaNamePathIndex]] + desc := schemaByName[splitPath[schemaNamePathIndex]] var index string - if fieldIndexesByName, ok := fieldIndexesByCollection[desc.Name]; ok { + if fieldIndexesByName, ok := fieldIndexesBySchema[desc.Name]; ok { if i, ok := fieldIndexesByName[fieldIndexer]; ok { index = fmt.Sprint(i) } @@ -249,7 +253,7 @@ func substituteSchemaPatch( index = "-" // If this is a new field we need to track its location so that subsequent operations // within the patch may access it by field name. - fieldIndexesByCollection[desc.Name][fieldIndexer] = len(fieldIndexesByCollection[desc.Name]) + fieldIndexesBySchema[desc.Name][fieldIndexer] = len(fieldIndexesBySchema[desc.Name]) } splitPath[fieldIndexPathIndex] = index @@ -261,17 +265,17 @@ func substituteSchemaPatch( if isField { if kind, isString := field["Kind"].(string); isString { - substitute, collectionName, err := getSubstituteFieldKind(kind, collectionsByName) + substitute, schemaName, err := getSubstituteFieldKind(kind, schemaByName) if err != nil { return nil, err } field["Kind"] = substitute - if collectionName != "" { - if field["Schema"] != nil && field["Schema"] != collectionName { + if schemaName != "" { + if field["Schema"] != nil && field["Schema"] != schemaName { return nil, NewErrFieldKindDoesNotMatchFieldSchema(kind, field["Schema"].(string)) } - field["Schema"] = collectionName + field["Schema"] = schemaName } newPatchValue = immutable.Some[any](field) @@ -284,7 +288,7 @@ func substituteSchemaPatch( } if kind, isString := kind.(string); isString { - substitute, _, err := getSubstituteFieldKind(kind, collectionsByName) + substitute, _, err := getSubstituteFieldKind(kind, schemaByName) if err != nil { return nil, err } @@ -314,7 +318,7 @@ func substituteSchemaPatch( // If the value represents a foreign relation the collection name will also be returned. func getSubstituteFieldKind( kind string, - collectionsByName map[string]client.CollectionDescription, + schemaByName map[string]client.SchemaDescription, ) (client.FieldKind, string, error) { substitute, substituteFound := client.FieldKindStringToEnumMapping[kind] if substituteFound { @@ -330,7 +334,7 @@ func getSubstituteFieldKind( substitute = client.FieldKind_FOREIGN_OBJECT } - if _, substituteFound := collectionsByName[collectionName]; substituteFound { + if _, substituteFound := schemaByName[collectionName]; substituteFound { return substitute, collectionName, nil } @@ -341,20 +345,19 @@ func getSubstituteFieldKind( // isFieldOrInner returns true if the given path points to a FieldDescription or a property within it. func isFieldOrInner(path []string) bool { //nolint:goconst - return len(path) >= 4 && path[fieldsPathIndex] == "Fields" && path[schemaPathIndex] == "Schema" + return len(path) >= 3 && path[fieldsPathIndex] == "Fields" } // isField returns true if the given path points to a FieldDescription. func isField(path []string) bool { - return len(path) == 4 && path[fieldsPathIndex] == "Fields" && path[schemaPathIndex] == "Schema" + return len(path) == 3 && path[fieldsPathIndex] == "Fields" } // isField returns true if the given path points to a FieldDescription.Kind property. func isFieldKind(path []string) bool { - return len(path) == 5 && + return len(path) == 4 && path[fieldIndexPathIndex+1] == "Kind" && - path[fieldsPathIndex] == "Fields" && - path[schemaPathIndex] == "Schema" + path[fieldsPathIndex] == "Fields" } // containsLetter returns true if the string contains a single unicode character. diff --git a/tests/integration/net/state/simple/peer/with_create_add_field_test.go b/tests/integration/net/state/simple/peer/with_create_add_field_test.go index 034340b92e..31861d6498 100644 --- a/tests/integration/net/state/simple/peer/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_create_add_field_test.go @@ -35,7 +35,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -108,7 +108,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t *testing.T) { NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -164,7 +164,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) // Patch the schema on all nodes Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/peer/with_update_add_field_test.go b/tests/integration/net/state/simple/peer/with_update_add_field_test.go index 89ab3a99b0..88e86a75a3 100644 --- a/tests/integration/net/state/simple/peer/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_update_add_field_test.go @@ -48,7 +48,7 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t *test NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -133,7 +133,7 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go index 3e36b5c847..f73c731666 100644 --- a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go @@ -35,7 +35,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -87,7 +87,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -137,7 +137,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion( // Patch the schema on all nodes Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go index 22786fcaad..52f67d324d 100644 --- a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go @@ -44,7 +44,7 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -125,7 +125,7 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index d6075f3496..f7bac725cf 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -39,7 +39,7 @@ func TestSchemaMigrationQuery(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -109,7 +109,7 @@ func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -196,7 +196,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -239,14 +239,14 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -310,14 +310,14 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -381,14 +381,14 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -473,7 +473,7 @@ func TestSchemaMigrationQueryWithUnknownSchemaMigration(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -533,7 +533,7 @@ func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -594,7 +594,7 @@ func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -657,7 +657,7 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -718,7 +718,7 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -792,7 +792,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } ] `, }, @@ -854,7 +854,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } ] `, }, diff --git a/tests/integration/schema/migrations/query/with_dockey_test.go b/tests/integration/schema/migrations/query/with_dockey_test.go index b4bc60aab8..c5c5a868f5 100644 --- a/tests/integration/schema/migrations/query/with_dockey_test.go +++ b/tests/integration/schema/migrations/query/with_dockey_test.go @@ -46,7 +46,7 @@ func TestSchemaMigrationQueryByDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -152,7 +152,7 @@ func TestSchemaMigrationQueryMultipleQueriesByDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index 303ea82598..50fbe12a5d 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -39,7 +39,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -129,7 +129,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -221,7 +221,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -230,7 +230,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index a2ba505cb7..007a92ab1e 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -39,7 +39,7 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go index 3c1a873c3e..a33ccc9277 100644 --- a/tests/integration/schema/migrations/query/with_set_default_test.go +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -43,7 +43,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t * testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, SetAsDefaultVersion: immutable.Some(false), @@ -106,7 +106,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, SetAsDefaultVersion: immutable.Some(false), @@ -188,7 +188,7 @@ func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, SetAsDefaultVersion: immutable.Some(true), diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go index 7d73288e01..449d2f4590 100644 --- a/tests/integration/schema/migrations/query/with_txn_test.go +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -40,7 +40,7 @@ func TestSchemaMigrationQueryWithTxn(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -102,7 +102,7 @@ func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go index 6ab0957634..1a54fd264d 100644 --- a/tests/integration/schema/migrations/query/with_update_test.go +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -39,7 +39,7 @@ func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -117,7 +117,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/crdt/composite_test.go b/tests/integration/schema/updates/add/field/crdt/composite_test.go index 1ad0dc06d2..e1891f95f7 100644 --- a/tests/integration/schema/updates/add/field/crdt/composite_test.go +++ b/tests/integration/schema/updates/add/field/crdt/composite_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTCompositeErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":3} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 3", diff --git a/tests/integration/schema/updates/add/field/crdt/invalid_test.go b/tests/integration/schema/updates/add/field/crdt/invalid_test.go index 0c899155fb..dee615dac2 100644 --- a/tests/integration/schema/updates/add/field/crdt/invalid_test.go +++ b/tests/integration/schema/updates/add/field/crdt/invalid_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTInvalidErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":99} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":99} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 99", diff --git a/tests/integration/schema/updates/add/field/crdt/lww_test.go b/tests/integration/schema/updates/add/field/crdt/lww_test.go index c8a4b93007..5d75d4db6e 100644 --- a/tests/integration/schema/updates/add/field/crdt/lww_test.go +++ b/tests/integration/schema/updates/add/field/crdt/lww_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTLWW(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":1} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/crdt/none_test.go b/tests/integration/schema/updates/add/field/crdt/none_test.go index 2ed83e3898..c49faa3904 100644 --- a/tests/integration/schema/updates/add/field/crdt/none_test.go +++ b/tests/integration/schema/updates/add/field/crdt/none_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTDefault(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldCRDTNone(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":0} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":0} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go index 5d87c8a57e..d36af59dc1 100644 --- a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go +++ b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTObjectWithBoolFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":2} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 2", diff --git a/tests/integration/schema/updates/add/field/create_test.go b/tests/integration/schema/updates/add/field/create_test.go index a6a14f2142..d59df6c294 100644 --- a/tests/integration/schema/updates/add/field/create_test.go +++ b/tests/integration/schema/updates/add/field/create_test.go @@ -36,7 +36,7 @@ func TestSchemaUpdatesAddFieldWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -84,7 +84,7 @@ func TestSchemaUpdatesAddFieldWithCreateAfterSchemaUpdate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index b9c9fdf7a6..5a61fac610 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -61,7 +61,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -127,7 +127,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuer testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_array_test.go index ee8d53644e..64df51a18c 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindBoolArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 3} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindBoolArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 3} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindBoolArraySubstitutionWithCreate(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go index e0c664127b..899d0cba36 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 18} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 18} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 18} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 18} } ] `, }, @@ -108,7 +108,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArraySubstitutionWithCreate(t *tes testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_test.go b/tests/integration/schema/updates/add/field/kind/bool_test.go index 7be3801bc3..c77d187dbb 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindBool(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindBoolWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindBoolSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/datetime_test.go b/tests/integration/schema/updates/add/field/kind/datetime_test.go index 5363864c47..6ebcc3af6f 100644 --- a/tests/integration/schema/updates/add/field/kind/datetime_test.go +++ b/tests/integration/schema/updates/add/field/kind/datetime_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindDateTime(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 10} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 10} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindDateTimeWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindDateTimeSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "DateTime"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "DateTime"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/dockey_test.go b/tests/integration/schema/updates/add/field/kind/dockey_test.go index f4d5d9aabe..6d8aca4736 100644 --- a/tests/integration/schema/updates/add/field/kind/dockey_test.go +++ b/tests/integration/schema/updates/add/field/kind/dockey_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 1} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindDocKeyWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 1} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindDocKeySubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "ID"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "ID"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_array_test.go b/tests/integration/schema/updates/add/field/kind/float_array_test.go index 86e8ddd882..dcf9fd3d42 100644 --- a/tests/integration/schema/updates/add/field/kind/float_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindFloatArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 7} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 7} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindFloatArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 7} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 7} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindFloatArraySubstitutionWithCreate(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Float!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Float!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go index 4cb1bb8133..9dd4209a38 100644 --- a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 20} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 20} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 20} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 20} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArraySubstitutionWithCreate(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Float]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Float]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_test.go b/tests/integration/schema/updates/add/field/kind/float_test.go index 9411a4e7d1..b145d4c148 100644 --- a/tests/integration/schema/updates/add/field/kind/float_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindFloat(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 6} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 6} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindFloatWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 6} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 6} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindFloatSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Float"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Float"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go index af852c8dd6..a3dc12fb13 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17} } ] `, ExpectedError: "a `Schema` [name] must be provided when adding a new relation field. Field: foo, Kind: 17", @@ -55,10 +55,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_InvalidSchemaJson(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": 123} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Schema.Fields.Schema of type string", + ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", }, }, } @@ -79,7 +79,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationType(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": "Users"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": "Users"} } ] `, ExpectedError: "invalid RelationType. Field: foo, Expected: 10, Actual: 0", @@ -103,7 +103,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationName(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 17, "RelationType": 10, "Schema": "Users" }} ] @@ -129,10 +129,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingKind(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: 0", @@ -156,10 +156,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidKind(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: Boolean", @@ -183,10 +183,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationType( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", @@ -210,10 +210,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidRelationType( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", @@ -237,10 +237,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationName( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -264,10 +264,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_OnlyHalfRelationDefined(t * testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -293,13 +293,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_NoPrimaryDefined(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -325,13 +325,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryDefinedOnManySide(t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 138, "Schema": "Users", "RelationName": "foo" }} ] @@ -357,13 +357,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindMismatch(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -389,13 +389,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindAndRelationTypeM testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }} ] @@ -421,13 +421,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedRelationTypeMismatch testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -455,13 +455,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_Succeeds(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -551,13 +551,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SinglePrimaryObjectKindSubs testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -630,13 +630,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SingleSecondaryObjectKindSu testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -709,13 +709,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitution(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -788,13 +788,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitutionWithA testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] @@ -872,13 +872,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryObjectKindAndSchemaM testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Dog", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -911,13 +911,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SecondaryObjectKindAndSchem testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Dog", "RelationName": "foo" }} ] @@ -945,10 +945,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] @@ -1022,10 +1022,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField_DoesN testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go index e09aa4dfac..21afdec279 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16} } ] `, ExpectedError: "a `Schema` [name] must be provided when adding a new relation field. Field: foo, Kind: 16", @@ -55,10 +55,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_InvalidSchemaJson(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": 123} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Schema.Fields.Schema of type string", + ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", }, }, } @@ -79,7 +79,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationType(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": "Users"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": "Users"} } ] `, ExpectedError: "invalid RelationType. Field: foo, Expected: 1 and 4 or 8, with optionally 128, Actual: 0", @@ -103,7 +103,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_UnknownSchema(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Unknown" }} ] @@ -129,7 +129,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationName(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users" }} ] @@ -155,10 +155,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingKind(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: 0", @@ -182,10 +182,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidKind(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: Boolean", @@ -209,10 +209,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationType(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", @@ -236,10 +236,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidRelationType(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", @@ -263,10 +263,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationName(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -290,10 +290,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_OnlyHalfRelationDefined(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -319,13 +319,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_NoPrimaryDefined(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -351,16 +351,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_BothSidesPrimary(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "Schema": "Users", "RelationName": "foo" }} ] @@ -386,13 +386,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_RelatedKindMismatch(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -418,13 +418,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_RelatedRelationTypeMismatch(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }} ] @@ -452,16 +452,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -549,16 +549,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SinglePrimaryObjectKindSubstitut testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -629,16 +629,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SingleSecondaryObjectKindSubstit testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -709,16 +709,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitution(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -789,16 +789,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitutionWithAutoSc testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -874,16 +874,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindAndSchemaMismatch(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Dog", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -911,13 +911,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingPrimaryIDField(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -989,13 +989,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingSecondaryIDField(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }} ] diff --git a/tests/integration/schema/updates/add/field/kind/int_array_test.go b/tests/integration/schema/updates/add/field/kind/int_array_test.go index 4e7c732ec1..9a6d9e69af 100644 --- a/tests/integration/schema/updates/add/field/kind/int_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindIntArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 5} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 5} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindIntArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 5} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 5} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntArraySubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Int!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go index 0642ffa894..0de26958bb 100644 --- a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 19} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 19} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 19} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 19} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArraySubstitutionWithCreate(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Int]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/int_test.go b/tests/integration/schema/updates/add/field/kind/int_test.go index 3e12ed9106..390a6c049d 100644 --- a/tests/integration/schema/updates/add/field/kind/int_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindInt(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindIntWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Int"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/invalid_test.go b/tests/integration/schema/updates/add/field/kind/invalid_test.go index fa7556a86b..5e578e5307 100644 --- a/tests/integration/schema/updates/add/field/kind/invalid_test.go +++ b/tests/integration/schema/updates/add/field/kind/invalid_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKind8(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 8} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 8} } ] `, ExpectedError: "no type found for given name. Type: 8", @@ -54,7 +54,7 @@ func TestSchemaUpdatesAddFieldKind9(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 9} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 9} } ] `, ExpectedError: "no type found for given name. Type: 9", @@ -78,7 +78,7 @@ func TestSchemaUpdatesAddFieldKind13(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 13} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 13} } ] `, ExpectedError: "no type found for given name. Type: 13", @@ -102,7 +102,7 @@ func TestSchemaUpdatesAddFieldKind14(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 14} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 14} } ] `, ExpectedError: "no type found for given name. Type: 14", @@ -126,7 +126,7 @@ func TestSchemaUpdatesAddFieldKind15(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 15} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 15} } ] `, ExpectedError: "no type found for given name. Type: 15", @@ -152,7 +152,7 @@ func TestSchemaUpdatesAddFieldKind22(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 22} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 22} } ] `, ExpectedError: "no type found for given name. Type: 22", @@ -178,7 +178,7 @@ func TestSchemaUpdatesAddFieldKind198(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 198} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 198} } ] `, ExpectedError: "no type found for given name. Type: 198", @@ -202,7 +202,7 @@ func TestSchemaUpdatesAddFieldKindInvalidSubstitution(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "InvalidKind"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "InvalidKind"} } ] `, ExpectedError: "no type found for given name. Kind: InvalidKind", diff --git a/tests/integration/schema/updates/add/field/kind/none_test.go b/tests/integration/schema/updates/add/field/kind/none_test.go index 7e8c44dc73..ab6111dadb 100644 --- a/tests/integration/schema/updates/add/field/kind/none_test.go +++ b/tests/integration/schema/updates/add/field/kind/none_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindNone(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 0} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 0} } ] `, ExpectedError: "no type found for given name. Type: 0", diff --git a/tests/integration/schema/updates/add/field/kind/string_array_test.go b/tests/integration/schema/updates/add/field/kind/string_array_test.go index d3e03c8b35..b035162aed 100644 --- a/tests/integration/schema/updates/add/field/kind/string_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindStringArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 12} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 12} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindStringArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 12} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 12} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindStringArraySubstitutionWithCreate(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[String!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[String!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go index c34fe22aba..9fc750cc80 100644 --- a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 21} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 21} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 21} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 21} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArraySubstitutionWithCreate(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[String]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[String]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/string_test.go b/tests/integration/schema/updates/add/field/kind/string_test.go index f32f9409c4..336c2fe6de 100644 --- a/tests/integration/schema/updates/add/field/kind/string_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindString(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 11} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindStringWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 11} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindStringSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "String"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index 1fe6980a62..56931567d4 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_Errors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, SetAsDefaultVersion: immutable.Some(false), @@ -129,8 +129,8 @@ func TestSchemaUpdatesAddFieldMultipleInPatch(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 11} } ] `, }, @@ -163,14 +163,14 @@ func TestSchemaUpdatesAddFieldMultiplePatches(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 11} } ] `, }, @@ -203,7 +203,7 @@ func TestSchemaUpdatesAddFieldSimpleWithoutName(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Kind": 11} } ] `, ExpectedError: "Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"\" does not.", @@ -228,8 +228,8 @@ func TestSchemaUpdatesAddFieldMultipleInPatchPartialSuccess(t *testing.T) { // Email field is valid, City field has invalid kind Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 111} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 111} } ] `, ExpectedError: "no type found for given name. Type: 111", @@ -272,7 +272,7 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateOfExistingField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} } ] `, ExpectedError: "duplicate field. Name: name", @@ -296,8 +296,8 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, ExpectedError: "duplicate field. Name: email", @@ -321,7 +321,7 @@ func TestSchemaUpdatesAddFieldWithExplicitIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"ID": 2, "Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"ID": 2, "Name": "email", "Kind": 11} } ] `, ExpectedError: "explicitly setting a field ID value is not supported. Field: email, ID: 2", diff --git a/tests/integration/schema/updates/add/field/with_filter_test.go b/tests/integration/schema/updates/add/field/with_filter_test.go index decdb7b997..6161d9a237 100644 --- a/tests/integration/schema/updates/add/field/with_filter_test.go +++ b/tests/integration/schema/updates/add/field/with_filter_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldSimpleWithFilter(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -70,7 +70,7 @@ func TestSchemaUpdatesAddFieldSimpleWithFilterOnPopulatedDatabase(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/with_index_sub_test.go b/tests/integration/schema/updates/add/field/with_index_sub_test.go index eb4dc3d9c0..274e3aac2b 100644 --- a/tests/integration/schema/updates/add/field/with_index_sub_test.go +++ b/tests/integration/schema/updates/add/field/with_index_sub_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByName(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Kind": 11} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameWithSameNameDefinedInValu testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Name": "email","Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Name": "email","Kind": 11} } ] `, }, @@ -94,7 +94,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameWithDifferentNameDefinedI testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Name": "different field name","Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Name": "different field name","Kind": 11} } ] `, ExpectedError: "the index used does not match the given name", @@ -118,8 +118,8 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameMultipleTimes(t *testing. testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Kind": 11} }, - { "op": "test", "path": "/Users/Schema/Fields/email/Kind", "value": 11 } + { "op": "add", "path": "/Users/Fields/email", "value": {"Kind": 11} }, + { "op": "test", "path": "/Users/Fields/email/Kind", "value": 11 } ] `, }, diff --git a/tests/integration/schema/updates/add/field/with_introspection_test.go b/tests/integration/schema/updates/add/field/with_introspection_test.go index df75ac43c3..ea9885674c 100644 --- a/tests/integration/schema/updates/add/field/with_introspection_test.go +++ b/tests/integration/schema/updates/add/field/with_introspection_test.go @@ -29,7 +29,7 @@ func TestSchemaUpdatesAddFieldIntrospection(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} } ] `, }, @@ -82,8 +82,8 @@ func TestSchemaUpdatesAddFieldIntrospectionDoesNotAmendGQLTypesGivenBadPatch(t * // [Name] should not be added to the GQL types. Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 111} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 111} } ] `, ExpectedError: "no type found for given name. Type: 111", diff --git a/tests/integration/schema/updates/add/simple_test.go b/tests/integration/schema/updates/add/simple_test.go index b8e4ce3a5f..0eac29b49a 100644 --- a/tests/integration/schema/updates/add/simple_test.go +++ b/tests/integration/schema/updates/add/simple_test.go @@ -86,7 +86,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingSchemaProp(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/-", "value": {"Foo": "Bar"} } + { "op": "add", "path": "/Users/-", "value": {"Foo": "Bar"} } ] `, ExpectedError: `json: unknown field "-"`, @@ -142,7 +142,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedSchemaProp(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Foo/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Foo/-", "value": {"Name": "email", "Kind": 11} } ] `, ExpectedError: "add operation does not apply: doc is missing path", diff --git a/tests/integration/schema/updates/copy/field/simple_test.go b/tests/integration/schema/updates/copy/field/simple_test.go index ff0680f55d..219169e103 100644 --- a/tests/integration/schema/updates/copy/field/simple_test.go +++ b/tests/integration/schema/updates/copy/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesCopyFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" } ] `, ExpectedError: "duplicate field. Name: email", @@ -67,9 +67,9 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { // clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/3" }, - { "op": "remove", "path": "/Users/Schema/Fields/3/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/3/Name", "value": "fax" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/3" }, + { "op": "remove", "path": "/Users/Fields/3/ID" }, + { "op": "replace", "path": "/Users/Fields/3/Name", "value": "fax" } ] `, }, @@ -105,10 +105,10 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * // re-typing the clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "age" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "Int" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "age" }, + { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "Int" } ] `, }, @@ -156,10 +156,10 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndInvalidKindSubstitut // re-typing the clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "Age" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "NotAValidKind" } + { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "Age" }, + { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "NotAValidKind" } ] `, ExpectedError: "no type found for given name. Kind: NotAValidKind", diff --git a/tests/integration/schema/updates/copy/field/with_introspection_test.go b/tests/integration/schema/updates/copy/field/with_introspection_test.go index 566b18db7c..2106d22b1b 100644 --- a/tests/integration/schema/updates/copy/field/with_introspection_test.go +++ b/tests/integration/schema/updates/copy/field/with_introspection_test.go @@ -31,9 +31,9 @@ func TestSchemaUpdatesCopyFieldIntrospectionWithRemoveIDAndReplaceName(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "fax" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "fax" } ] `, }, diff --git a/tests/integration/schema/updates/copy/simple_test.go b/tests/integration/schema/updates/copy/simple_test.go index 5b4c19ed22..37d0f63fa3 100644 --- a/tests/integration/schema/updates/copy/simple_test.go +++ b/tests/integration/schema/updates/copy/simple_test.go @@ -33,12 +33,10 @@ func TestSchemaUpdatesCopyCollectionWithRemoveIDAndReplaceName(t *testing.T) { Patch: ` [ { "op": "copy", "from": "/Users", "path": "/Book" }, - { "op": "remove", "path": "/Book/ID" }, - { "op": "remove", "path": "/Book/Schema/SchemaID" }, - { "op": "remove", "path": "/Book/Schema/VersionID" }, - { "op": "remove", "path": "/Book/Schema/Fields/1/ID" }, - { "op": "replace", "path": "/Book/Name", "value": "Book" }, - { "op": "replace", "path": "/Book/Schema/Name", "value": "Book" } + { "op": "remove", "path": "/Book/SchemaID" }, + { "op": "remove", "path": "/Book/VersionID" }, + { "op": "remove", "path": "/Book/Fields/1/ID" }, + { "op": "replace", "path": "/Book/Name", "value": "Book" } ] `, ExpectedError: "unknown collection, adding collections via patch is not supported. Name: Book", diff --git a/tests/integration/schema/updates/index/simple_test.go b/tests/integration/schema/updates/index/simple_test.go index 970ef2bb86..fb506ec623 100644 --- a/tests/integration/schema/updates/index/simple_test.go +++ b/tests/integration/schema/updates/index/simple_test.go @@ -31,7 +31,7 @@ func TestPatching_ForCollectionWithIndex_StillWorks(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -49,185 +49,3 @@ func TestPatching_ForCollectionWithIndex_StillWorks(t *testing.T) { } testUtils.ExecuteTestCase(t, test) } - -func TestPatching_IfAttemptToAddIndex_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test adding index to collection via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Indexes/-", "value": { - "Name": "some_index", - "ID": 0, - "Fields": [ - { - "Name": "age", - "Direction": "ASC" - } - ] - } - } - ] - `, - ExpectedError: "adding indexes via patch is not supported. ProposedName: some_index", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToDropIndex_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test dropping index from collection via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int @index(name: "users_age_index") - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Users/Indexes/1" } - ] - `, - ExpectedError: "dropping indexes via patch is not supported. Name: users_age_index", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToChangeIndexName_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test changing index's name via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Name", "value": "new_index_name" } - ] - `, - ExpectedError: "adding indexes via patch is not supported. ProposedName: new_index_name", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToChangeIndexField_ReturnError(t *testing.T) { - testCases := []struct { - description string - patch string - }{ - { - description: "Test adding a field to an index via patch fails", - patch: ` - [ - { "op": "add", "path": "/Users/Indexes/0/Fields/-", "value": { - "Name": "age", - "Direction": "ASC" - } - } - ] - `, - }, - { - description: "Test removing a field from an index via patch fails", - patch: ` - [ - { "op": "remove", "path": "/Users/Indexes/0/Fields/0" } - ] - `, - }, - { - description: "Test changing index's field name via patch fails", - patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Name", "value": "new_field_name" } - ] - `, - }, - { - description: "Test changing index's field direction via patch fails", - patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Direction", "value": "DESC" } - ] - `, - }, - } - - for _, testCase := range testCases { - test := testUtils.TestCase{ - Description: testCase.description, - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: testCase.patch, - ExpectedError: "changing indexes via patch is not supported", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) - } -} diff --git a/tests/integration/schema/updates/move/field/simple_test.go b/tests/integration/schema/updates/move/field/simple_test.go index 197b9410b7..9a678e8ab5 100644 --- a/tests/integration/schema/updates/move/field/simple_test.go +++ b/tests/integration/schema/updates/move/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesMoveFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "move", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/-" } + { "op": "move", "from": "/Users/Fields/1", "path": "/Users/Fields/-" } ] `, ExpectedError: "moving fields is not currently supported. Name: name, ProposedIndex: 1, ExistingIndex: 2", diff --git a/tests/integration/schema/updates/remove/fields/simple_test.go b/tests/integration/schema/updates/remove/fields/simple_test.go index f4fa6c2482..515a8736e5 100644 --- a/tests/integration/schema/updates/remove/fields/simple_test.go +++ b/tests/integration/schema/updates/remove/fields/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesRemoveFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2" } + { "op": "remove", "path": "/Users/Fields/2" } ] `, ExpectedError: "deleting an existing field is not supported. Name: name", @@ -56,7 +56,7 @@ func TestSchemaUpdatesRemoveAllFieldsErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields" } + { "op": "remove", "path": "/Users/Fields" } ] `, ExpectedError: "deleting an existing field is not supported", @@ -81,7 +81,7 @@ func TestSchemaUpdatesRemoveFieldNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Name" } + { "op": "remove", "path": "/Users/Fields/2/Name" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -106,7 +106,7 @@ func TestSchemaUpdatesRemoveFieldIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" } + { "op": "remove", "path": "/Users/Fields/2/ID" } ] `, ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", @@ -131,7 +131,7 @@ func TestSchemaUpdatesRemoveFieldKindErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Kind" } + { "op": "remove", "path": "/Users/Fields/2/Kind" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -156,7 +156,7 @@ func TestSchemaUpdatesRemoveFieldTypErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Typ" } + { "op": "remove", "path": "/Users/Fields/2/Typ" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -185,7 +185,7 @@ func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/Schema" } + { "op": "remove", "path": "/Author/Fields/1/Schema" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", @@ -214,7 +214,7 @@ func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/RelationName" } + { "op": "remove", "path": "/Author/Fields/1/RelationName" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", @@ -243,7 +243,7 @@ func TestSchemaUpdatesRemoveFieldRelationTypeErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/RelationType" } + { "op": "remove", "path": "/Author/Fields/1/RelationType" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", diff --git a/tests/integration/schema/updates/remove/simple_test.go b/tests/integration/schema/updates/remove/simple_test.go index 19f9ea1836..6944855ed5 100644 --- a/tests/integration/schema/updates/remove/simple_test.go +++ b/tests/integration/schema/updates/remove/simple_test.go @@ -34,32 +34,7 @@ func TestSchemaUpdatesRemoveCollectionNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Name" } ] `, - ExpectedError: "collection name can't be empty", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesRemoveCollectionIDErrors(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, remove collection id", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - email: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Users/ID" } - ] - `, - ExpectedError: "CollectionID does not match existing. Name: Users, ExistingID: 1, ProposedID: 0", + ExpectedError: "schema name can't be empty", }, }, } @@ -81,7 +56,7 @@ func TestSchemaUpdatesRemoveSchemaIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/SchemaID" } + { "op": "remove", "path": "/Users/SchemaID" } ] `, ExpectedError: "SchemaID does not match existing", @@ -107,7 +82,7 @@ func TestSchemaUpdatesRemoveSchemaVersionIDErrors(t *testing.T) { // This should do nothing Patch: ` [ - { "op": "remove", "path": "/Users/Schema/VersionID" } + { "op": "remove", "path": "/Users/VersionID" } ] `, }, @@ -140,7 +115,7 @@ func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Name" } + { "op": "remove", "path": "/Users/Name" } ] `, ExpectedError: "schema name can't be empty", diff --git a/tests/integration/schema/updates/replace/field/simple_test.go b/tests/integration/schema/updates/replace/field/simple_test.go index e56f708f99..057b8fe9b7 100644 --- a/tests/integration/schema/updates/replace/field/simple_test.go +++ b/tests/integration/schema/updates/replace/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesReplaceFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "replace", "path": "/Users/Schema/Fields/2", "value": {"Name": "Fax", "Kind": 11} } + { "op": "replace", "path": "/Users/Fields/2", "value": {"Name": "Fax", "Kind": 11} } ] `, ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", @@ -56,7 +56,7 @@ func TestSchemaUpdatesReplaceFieldWithIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "replace", "path": "/Users/Schema/Fields/2", "value": {"ID":2, "Name": "fax", "Kind": 11} } + { "op": "replace", "path": "/Users/Fields/2", "value": {"ID":2, "Name": "fax", "Kind": 11} } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: fax", diff --git a/tests/integration/schema/updates/replace/simple_test.go b/tests/integration/schema/updates/replace/simple_test.go index 600a12b69c..7729a274c9 100644 --- a/tests/integration/schema/updates/replace/simple_test.go +++ b/tests/integration/schema/updates/replace/simple_test.go @@ -34,12 +34,9 @@ func TestSchemaUpdatesReplaceCollectionErrors(t *testing.T) { { "op": "replace", "path": "/Users", "value": { "Name": "Book", - "Schema": { - "Name": "Book", - "Fields": [ - {"Name": "name", "Kind": 11} - ] - } + "Fields": [ + {"Name": "name", "Kind": 11} + ] } } ] diff --git a/tests/integration/schema/updates/test/add_field_test.go b/tests/integration/schema/updates/test/add_field_test.go index 179dddbc43..31eec344f2 100644 --- a/tests/integration/schema/updates/test/add_field_test.go +++ b/tests/integration/schema/updates/test/add_field_test.go @@ -30,8 +30,8 @@ func TestSchemaUpdatesTestAddField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Name", "value": "Users" }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "test", "path": "/Users/Name", "value": "Users" }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -63,8 +63,8 @@ func TestSchemaUpdatesTestAddFieldBlockedByTest(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Name", "value": "Author" }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"name": "Email", "Kind": 11} } + { "op": "test", "path": "/Users/Name", "value": "Author" }, + { "op": "add", "path": "/Users/Fields/-", "value": {"name": "Email", "Kind": 11} } ] `, ExpectedError: "test failed", diff --git a/tests/integration/schema/updates/test/field/simple_test.go b/tests/integration/schema/updates/test/field/simple_test.go index 24532a8718..414a472149 100644 --- a/tests/integration/schema/updates/test/field/simple_test.go +++ b/tests/integration/schema/updates/test/field/simple_test.go @@ -30,10 +30,10 @@ func TestSchemaUpdatesTestFieldNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1/name", "value": "Email" } + { "op": "test", "path": "/Users/Fields/1/name", "value": "Email" } ] `, - ExpectedError: "testing value /Users/Schema/Fields/1/name failed: test failed", + ExpectedError: "testing value /Users/Fields/1/name failed: test failed", }, }, } @@ -54,7 +54,7 @@ func TestSchemaUpdatesTestFieldNamePasses(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1/Name", "value": "name" } + { "op": "test", "path": "/Users/Fields/1/Name", "value": "name" } ] `, }, @@ -77,10 +77,10 @@ func TestSchemaUpdatesTestFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1", "value": {"Name": "name", "Kind": 11} } + { "op": "test", "path": "/Users/Fields/1", "value": {"Name": "name", "Kind": 11} } ] `, - ExpectedError: "testing value /Users/Schema/Fields/1 failed: test failed", + ExpectedError: "testing value /Users/Fields/1 failed: test failed", }, }, } @@ -101,7 +101,7 @@ func TestSchemaUpdatesTestFieldPasses(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1", "value": { + { "op": "test", "path": "/Users/Fields/1", "value": { "ID":1, "Name": "name", "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 } } ] @@ -126,7 +126,7 @@ func TestSchemaUpdatesTestFieldPasses_UsingFieldNameAsIndex(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/name", "value": { + { "op": "test", "path": "/Users/Fields/name", "value": { "ID":1, "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 } } ] @@ -151,7 +151,7 @@ func TestSchemaUpdatesTestFieldPasses_TargettingKindUsingFieldNameAsIndex(t *tes testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/name/Kind", "value": 11 } + { "op": "test", "path": "/Users/Fields/name/Kind", "value": 11 } ] `, }, diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go index 1551aff972..c3704deb52 100644 --- a/tests/integration/schema/with_update_set_default_test.go +++ b/tests/integration/schema/with_update_set_default_test.go @@ -32,7 +32,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToEmptyString_Errors(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -59,7 +59,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToUnknownVersion_Errors(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -86,7 +86,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, SetAsDefaultVersion: immutable.Some(false), @@ -123,7 +123,7 @@ func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t * testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, SetAsDefaultVersion: immutable.Some(false), From 3d0c7fb35e6b9685a2e93cfee1c1f54180f168a7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 17 Oct 2023 13:05:17 -0400 Subject: [PATCH 29/55] bot: Update dependencies (bulk dependabot PRs) 16-10-2023 (#1976) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✅ This PR was created by the Combine PRs action by combining the following PRs: #1973 bot: Bump @types/react-dom from 18.2.12 to 18.2.13 in /playground #1971 bot: Bump @typescript-eslint/parser from 6.7.4 to 6.7.5 in /playground #1970 bot: Bump github.com/spf13/viper from 1.16.0 to 1.17.0 #1969 bot: Bump go.uber.org/zap from 1.25.0 to 1.26.0 #1968 bot: Bump github.com/multiformats/go-multiaddr from 0.11.0 to 0.12.0 #1967 bot: Bump github.com/bits-and-blooms/bitset from 1.9.0 to 1.10.0 #1966 bot: Bump google.golang.org/grpc from 1.58.2 to 1.58.3 ⚠️ The following PRs were left out due to merge conflicts: #1972 bot: Bump @types/react from 18.2.25 to 18.2.28 in /playground --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 32 +++--- go.sum | 63 +++++----- playground/package-lock.json | 216 +++-------------------------------- playground/package.json | 4 +- 4 files changed, 72 insertions(+), 243 deletions(-) diff --git a/go.mod b/go.mod index 8fb5ae48ee..708d1878ad 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/sourcenetwork/defradb go 1.20 require ( - github.com/bits-and-blooms/bitset v1.9.0 + github.com/bits-and-blooms/bitset v1.10.0 github.com/bxcodec/faker v2.0.1+incompatible github.com/dgraph-io/badger/v4 v4.1.0 github.com/evanphx/json-patch/v5 v5.7.0 @@ -29,13 +29,13 @@ require ( github.com/libp2p/go-libp2p-pubsub v0.9.3 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.11.0 + github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 github.com/sourcenetwork/immutable v0.3.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.16.0 + github.com/spf13/viper v1.17.0 github.com/stretchr/testify v1.8.4 github.com/textileio/go-libp2p-pubsub-rpc v0.0.9 github.com/tidwall/btree v1.7.0 @@ -44,11 +44,11 @@ require ( github.com/vito/go-sse v1.0.0 go.opentelemetry.io/otel/metric v1.19.0 go.opentelemetry.io/otel/sdk/metric v1.19.0 - go.uber.org/zap v1.25.0 + go.uber.org/zap v1.26.0 golang.org/x/crypto v0.14.0 - golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 golang.org/x/net v0.17.0 - google.golang.org/grpc v1.58.2 + google.golang.org/grpc v1.58.3 google.golang.org/protobuf v1.31.0 ) @@ -61,7 +61,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect @@ -119,7 +119,7 @@ require ( github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -155,9 +155,9 @@ require ( github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_golang v1.16.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect @@ -169,12 +169,14 @@ require ( github.com/quic-go/webtransport-go v0.5.3 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tetratelabs/wazero v1.3.1 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect @@ -191,10 +193,10 @@ require ( golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect + golang.org/x/tools v0.13.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/go.sum b/go.sum index 1b1c80f4bf..b6dab22da8 100644 --- a/go.sum +++ b/go.sum @@ -86,8 +86,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.9.0 h1:g1YivPG8jOtrN013Fe8OBXubkiTwvm7/vG2vXz03ANU= -github.com/bits-and-blooms/bitset v1.9.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= +github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -156,8 +156,9 @@ github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= @@ -611,8 +612,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -977,8 +978,8 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.11.0 h1:XqGyJ8ufbCE0HmTDwx2kPdsrQ36AGPZNZX6s6xfJH10= -github.com/multiformats/go-multiaddr v0.11.0/go.mod h1:gWUm0QLR4thQ6+ZF6SXUw8YjtwQSPapICM+NmCkxHSM= +github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= +github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1077,8 +1078,8 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2D github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1088,8 +1089,9 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1149,6 +1151,10 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1192,6 +1198,8 @@ github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qt github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= github.com/sourcenetwork/badger/v4 v4.0.0-20230801145501-d3a57bd4c2ec h1:br39/Te7XrQmirI+QtT6YblhD9T6B2dzDNI9eoI26Pg= github.com/sourcenetwork/badger/v4 v4.0.0-20230801145501-d3a57bd4c2ec/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= @@ -1207,8 +1215,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -1217,15 +1225,13 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1244,11 +1250,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= @@ -1355,8 +1360,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1400,8 +1405,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= -golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1675,8 +1680,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= -golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1758,8 +1763,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1784,8 +1789,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.58.2 h1:SXUpjxeVF3FKrTYQI4f4KvbGD5u2xccdYdurwowix5I= -google.golang.org/grpc v1.58.2/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/playground/package-lock.json b/playground/package-lock.json index edf9e8c1cc..77ca69d25a 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -18,9 +18,9 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.12", + "@types/react-dom": "^18.2.13", "@typescript-eslint/eslint-plugin": "^6.7.5", - "@typescript-eslint/parser": "^6.7.4", + "@typescript-eslint/parser": "^6.7.5", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.51.0", "eslint-plugin-react-hooks": "^4.6.0", @@ -1659,9 +1659,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.12", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", - "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", + "version": "18.2.13", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.13.tgz", + "integrity": "sha512-eJIUv7rPP+EC45uNYp/ThhSpE16k22VJUknt5OLoH9tbXoi8bMhwLf5xRuWMywamNbWzhrSmU7IBJfPup1+3fw==", "devOptional": true, "dependencies": { "@types/react": "*" @@ -1722,63 +1722,16 @@ } } }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz", - "integrity": "sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.5", - "@typescript-eslint/visitor-keys": "6.7.5" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/types": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", - "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/eslint-plugin/node_modules/@typescript-eslint/visitor-keys": { + "node_modules/@typescript-eslint/parser": { "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", - "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.5.tgz", + "integrity": "sha512-bIZVSGx2UME/lmhLcjdVc7ePBwn7CLqKarUBL4me1C5feOd663liTGjMBGVcGr+BhnSLeP4SgwdvNnnkbIdkCw==", "dev": true, "dependencies": { + "@typescript-eslint/scope-manager": "6.7.5", "@typescript-eslint/types": "6.7.5", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/parser": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.4.tgz", - "integrity": "sha512-I5zVZFY+cw4IMZUeNCU7Sh2PO5O57F7Lr0uyhgCJmhN/BuTlnc55KxPonR4+EM3GBdfiCyGZye6DgMjtubQkmA==", - "dev": true, - "dependencies": { - "@typescript-eslint/scope-manager": "6.7.4", - "@typescript-eslint/types": "6.7.4", - "@typescript-eslint/typescript-estree": "6.7.4", - "@typescript-eslint/visitor-keys": "6.7.4", + "@typescript-eslint/typescript-estree": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5", "debug": "^4.3.4" }, "engines": { @@ -1798,13 +1751,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.4.tgz", - "integrity": "sha512-SdGqSLUPTXAXi7c3Ob7peAGVnmMoGzZ361VswK2Mqf8UOYcODiYvs8rs5ILqEdfvX1lE7wEZbLyELCW+Yrql1A==", + "version": "6.7.5", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz", + "integrity": "sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.4", - "@typescript-eslint/visitor-keys": "6.7.4" + "@typescript-eslint/types": "6.7.5", + "@typescript-eslint/visitor-keys": "6.7.5" }, "engines": { "node": "^16.0.0 || >=18.0.0" @@ -1841,7 +1794,7 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/types": { + "node_modules/@typescript-eslint/types": { "version": "6.7.5", "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", @@ -1854,7 +1807,7 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/typescript-estree": { + "node_modules/@typescript-eslint/typescript-estree": { "version": "6.7.5", "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.5.tgz", "integrity": "sha512-NhJiJ4KdtwBIxrKl0BqG1Ur+uw7FiOnOThcYx9DpOGJ/Abc9z2xNzLeirCG02Ig3vkvrc2qFLmYSSsaITbKjlg==", @@ -1881,63 +1834,6 @@ } } }, - "node_modules/@typescript-eslint/type-utils/node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", - "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.5", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/types": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.4.tgz", - "integrity": "sha512-o9XWK2FLW6eSS/0r/tgjAGsYasLAnOWg7hvZ/dGYSSNjCh+49k5ocPN8OmG5aZcSJ8pclSOyVKP2x03Sj+RrCA==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.4.tgz", - "integrity": "sha512-ty8b5qHKatlNYd9vmpHooQz3Vki3gG+3PchmtsA4TgrZBKWHNjWfkQid7K7xQogBqqc7/BhGazxMD5vr6Ha+iQ==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.4", - "@typescript-eslint/visitor-keys": "6.7.4", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, "node_modules/@typescript-eslint/utils": { "version": "6.7.5", "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.5.tgz", @@ -1963,64 +1859,7 @@ "eslint": "^7.0.0 || ^8.0.0" } }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.5.tgz", - "integrity": "sha512-GAlk3eQIwWOJeb9F7MKQ6Jbah/vx1zETSDw8likab/eFcqkjSD7BI75SDAeC5N2L0MmConMoPvTsmkrg71+B1A==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.5", - "@typescript-eslint/visitor-keys": "6.7.5" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/types": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.5.tgz", - "integrity": "sha512-WboQBlOXtdj1tDFPyIthpKrUb+kZf2VroLZhxKa/VlwLlLyqv/PwUNgL30BlTVZV1Wu4Asu2mMYPqarSO4L5ZQ==", - "dev": true, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.5", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.5.tgz", - "integrity": "sha512-NhJiJ4KdtwBIxrKl0BqG1Ur+uw7FiOnOThcYx9DpOGJ/Abc9z2xNzLeirCG02Ig3vkvrc2qFLmYSSsaITbKjlg==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.5", - "@typescript-eslint/visitor-keys": "6.7.5", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } - } - }, - "node_modules/@typescript-eslint/utils/node_modules/@typescript-eslint/visitor-keys": { + "node_modules/@typescript-eslint/visitor-keys": { "version": "6.7.5", "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.5.tgz", "integrity": "sha512-3MaWdDZtLlsexZzDSdQWsFQ9l9nL8B80Z4fImSpyllFC/KLqWQRdEcB+gGGO+N3Q2uL40EsG66wZLsohPxNXvg==", @@ -2037,23 +1876,6 @@ "url": "https://opencollective.com/typescript-eslint" } }, - "node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.4", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.4.tgz", - "integrity": "sha512-pOW37DUhlTZbvph50x5zZCkFn3xzwkGtNoJHzIM3svpiSkJzwOYr/kVBaXmf+RAQiUDs1AHEZVNPg6UJCJpwRA==", - "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.4", - "eslint-visitor-keys": "^3.4.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - } - }, "node_modules/@vitejs/plugin-react-swc": { "version": "3.4.0", "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.4.0.tgz", diff --git a/playground/package.json b/playground/package.json index 245101f0cb..a896c3c97e 100644 --- a/playground/package.json +++ b/playground/package.json @@ -20,9 +20,9 @@ }, "devDependencies": { "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.12", + "@types/react-dom": "^18.2.13", "@typescript-eslint/eslint-plugin": "^6.7.5", - "@typescript-eslint/parser": "^6.7.4", + "@typescript-eslint/parser": "^6.7.5", "@vitejs/plugin-react-swc": "^3.4.0", "eslint": "^8.51.0", "eslint-plugin-react-hooks": "^4.6.0", From 0efe835cc986a5f15e113fce37428856d772c4c1 Mon Sep 17 00:00:00 2001 From: AndrewSisley Date: Tue, 17 Oct 2023 18:10:10 -0400 Subject: [PATCH 30/55] feat: Remove CollectionDescription.Schema (#1965) ## Relevant issue(s) Resolves #1958 ## Description Removes CollectionDescription.Schema. Also splits the storage of schema out from within collection. The storage of schema has been broken out to a new sub-package of db, at the moment it is a very simple file, but collection will be moved there in https://github.com/sourcenetwork/defradb/issues/1964. I was planning on doing that in this PR (in part, to provide context for reviewers, as atm it is basically a single-file package), but it proved to be non-trivial due to some existing messiness in that space and was broken out to two more tasks. I also wish for stuff in that directory to eventually follow a repository-like pattern, where stuff is cached (within a context/txn's context) instead of fetching from store on each call. Moving this stuff out to a new directory instead of preserving it in the (already very large) db directory should make both db and the new sub-package a fair bit more cohesive and easier to read. --- client/descriptions.go | 6 +- core/key.go | 37 ++++ db/collection.go | 97 +++------- db/description/errors.go | 30 +++ db/description/schema.go | 177 ++++++++++++++++++ db/index_test.go | 120 ------------ db/indexed_docs_test.go | 80 -------- db/schema.go | 42 +---- .../i1958-remove-col-description-schema.md | 3 + planner/sum.go | 4 +- 10 files changed, 287 insertions(+), 309 deletions(-) create mode 100644 db/description/errors.go create mode 100644 db/description/schema.go create mode 100644 docs/data_format_changes/i1958-remove-col-description-schema.md diff --git a/client/descriptions.go b/client/descriptions.go index 8dbe54ddce..664d258753 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -27,10 +27,8 @@ type CollectionDescription struct { // It is immutable. ID uint32 - // Schema contains the data type information that this Collection uses. - // - // This property is deprecated and should not be used. - Schema SchemaDescription + // The ID of the schema version that this collection is at. + SchemaVersionID string // Indexes contains the secondary indexes that this Collection has. Indexes []IndexDescription diff --git a/core/key.go b/core/key.go index a8ec5ece2b..c7c050897c 100644 --- a/core/key.go +++ b/core/key.go @@ -47,6 +47,7 @@ const ( COLLECTION_SCHEMA_VERSION_HISTORY = "/collection/version/h" COLLECTION_INDEX = "/collection/index" SCHEMA_MIGRATION = "/schema/migration" + SCHEMA_VERSION = "/schema/version" SEQ = "/seq" PRIMARY_KEY = "/pk" DATASTORE_DOC_VERSION_FIELD_ID = "v" @@ -132,6 +133,15 @@ type CollectionIndexKey struct { var _ Key = (*CollectionIndexKey)(nil) +// SchemaVersionKey points to the json serialized schema at the specified version. +// +// It's corresponding value is immutable. +type SchemaVersionKey struct { + SchemaVersionID string +} + +var _ Key = (*SchemaVersionKey)(nil) + // SchemaHistoryKey holds the pathway through the schema version history for // any given schema. // @@ -257,6 +267,11 @@ func NewCollectionSchemaVersionKey(schemaVersionId string) CollectionSchemaVersi return CollectionSchemaVersionKey{SchemaVersionId: schemaVersionId} } +func NewCollectionSchemaVersionKeyFromString(key string) CollectionSchemaVersionKey { + elements := strings.Split(key, "/") + return CollectionSchemaVersionKey{SchemaVersionId: elements[len(elements)-1]} +} + // NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. func NewCollectionIndexKey(colID, indexName string) CollectionIndexKey { return CollectionIndexKey{CollectionName: colID, IndexName: indexName} @@ -307,6 +322,10 @@ func (k CollectionIndexKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } +func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { + return SchemaVersionKey{SchemaVersionID: schemaVersionID} +} + func NewSchemaHistoryKey(schemaId string, previousSchemaVersionID string) SchemaHistoryKey { return SchemaHistoryKey{ SchemaID: schemaId, @@ -625,6 +644,24 @@ func (k CollectionSchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } +func (k SchemaVersionKey) ToString() string { + result := SCHEMA_VERSION + + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID + } + + return result +} + +func (k SchemaVersionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaVersionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + func (k SchemaHistoryKey) ToString() string { result := COLLECTION_SCHEMA_VERSION_HISTORY diff --git a/db/collection.go b/db/collection.go index b86024e16e..f7da81e59d 100644 --- a/db/collection.go +++ b/db/collection.go @@ -28,14 +28,13 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" - ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/lens" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" ) @@ -119,35 +118,11 @@ func (db *db) createCollection( } desc.ID = uint32(colID) - for i := range schema.Fields { - schema.Fields[i].ID = client.FieldID(i) - } - - col, err := db.newCollection(desc, schema) - if err != nil { - return nil, err - } - - // Local elements such as secondary indexes should be excluded - // from the (global) schemaId. - schemaBuf, err := json.Marshal(schema) + schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { return nil, err } - - // add a reference to this DB by desc hash - cid, err := ccid.NewSHA256CidV1(schemaBuf) - if err != nil { - return nil, err - } - schemaID := cid.String() - - // For new schemas the initial version id will match the schema id - schemaVersionID := schemaID - - schema.VersionID = schemaVersionID - schema.SchemaID = schemaID - desc.Schema = schema + desc.SchemaVersionID = schema.VersionID // buffer must include all the ids, as it is saved and loaded from the store later. buf, err := json.Marshal(desc) @@ -155,7 +130,7 @@ func (db *db) createCollection( return nil, err } - collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID) + collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schema.VersionID) // Whilst the schemaVersionKey is global, the data persisted at the key's location // is local to the node (the global only elements are not useful beyond key generation). err = txn.Systemstore().Put(ctx, collectionSchemaVersionKey.ToDS(), buf) @@ -163,23 +138,21 @@ func (db *db) createCollection( return nil, err } - collectionSchemaKey := core.NewCollectionSchemaKey(schemaID) - err = txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schemaVersionID)) + collectionSchemaKey := core.NewCollectionSchemaKey(schema.SchemaID) + err = txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schema.VersionID)) if err != nil { return nil, err } - err = txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schemaVersionID)) + err = txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schema.VersionID)) if err != nil { return nil, err } - log.Debug( - ctx, - "Created collection", - logging.NewKV("Name", col.Name()), - logging.NewKV("SchemaID", col.SchemaID()), - ) + col, err := db.newCollection(desc, schema) + if err != nil { + return nil, err + } for _, index := range desc.Indexes { if _, err := col.createIndex(ctx, txn, index); err != nil { @@ -203,12 +176,9 @@ func (db *db) updateSchema( txn datastore.Txn, existingSchemaByName map[string]client.SchemaDescription, proposedDescriptionsByName map[string]client.SchemaDescription, - def client.CollectionDefinition, + schema client.SchemaDescription, setAsDefaultVersion bool, ) (client.Collection, error) { - schema := def.Schema - desc := def.Description - hasChanged, err := db.validateUpdateSchema( ctx, txn, @@ -221,7 +191,7 @@ func (db *db) updateSchema( } if !hasChanged { - return db.getCollectionByName(ctx, txn, desc.Name) + return db.getCollectionByName(ctx, txn, schema.Name) } for _, field := range schema.Fields { @@ -239,14 +209,6 @@ func (db *db) updateSchema( } for i, field := range schema.Fields { - if field.ID == client.FieldID(0) { - // This is not wonderful and will probably break when we add the ability - // to delete fields, however it is good enough for now and matches the - // create behaviour. - field.ID = client.FieldID(i) - schema.Fields[i] = field - } - if field.Typ == client.NONE_CRDT { // If no CRDT Type has been provided, default to LWW_REGISTER. field.Typ = client.LWW_REGISTER @@ -254,26 +216,24 @@ func (db *db) updateSchema( } } - globalSchemaBuf, err := json.Marshal(schema) + schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { return nil, err } - cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) + col, err := db.getCollectionByName(ctx, txn, schema.Name) if err != nil { return nil, err } - previousSchemaVersionID := schema.VersionID - schemaVersionID := cid.String() - schema.VersionID = schemaVersionID - desc.Schema = schema + desc := col.Description() + desc.SchemaVersionID = schema.VersionID buf, err := json.Marshal(desc) if err != nil { return nil, err } - collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID) + collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schema.VersionID) // Whilst the schemaVersionKey is global, the data persisted at the key's location // is local to the node (the global only elements are not useful beyond key generation). err = txn.Systemstore().Put(ctx, collectionSchemaVersionKey.ToDS(), buf) @@ -281,14 +241,8 @@ func (db *db) updateSchema( return nil, err } - schemaVersionHistoryKey := core.NewSchemaHistoryKey(schema.SchemaID, previousSchemaVersionID) - err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } - if setAsDefaultVersion { - err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, schema.SchemaID, schemaVersionID) + err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, schema.SchemaID, schema.VersionID) if err != nil { return nil, err } @@ -308,6 +262,10 @@ func (db *db) validateUpdateSchema( proposedDescriptionsByName map[string]client.SchemaDescription, proposedDesc client.SchemaDescription, ) (bool, error) { + if proposedDesc.Name == "" { + return false, ErrSchemaNameEmpty + } + existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] if !collectionExists { return false, NewErrAddCollectionWithPatch(proposedDesc.Name) @@ -538,7 +496,7 @@ func (db *db) setDefaultSchemaVersion( } desc := col.Description() - err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, desc.Schema.SchemaID, schemaVersionID) + err = db.setDefaultSchemaVersionExplicit(ctx, txn, desc.Name, col.Schema().SchemaID, schemaVersionID) if err != nil { return err } @@ -597,11 +555,16 @@ func (db *db) getCollectionByVersionID( return nil, err } + schema, err := description.GetSchemaVersion(ctx, txn, desc.SchemaVersionID) + if err != nil { + return nil, err + } + col := &collection{ db: db, def: client.CollectionDefinition{ Description: desc, - Schema: desc.Schema, + Schema: schema, }, } diff --git a/db/description/errors.go b/db/description/errors.go new file mode 100644 index 0000000000..7ca524e81d --- /dev/null +++ b/db/description/errors.go @@ -0,0 +1,30 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package description + +import "github.com/sourcenetwork/defradb/errors" + +const ( + errFailedToCreateSchemaQuery string = "failed to create schema prefix query" + errFailedToCloseSchemaQuery string = "failed to close schema prefix query" +) + +// NewErrFailedToCreateSchemaQuery returns a new error indicating that the query +// to create a schema failed. +func NewErrFailedToCreateSchemaQuery(inner error) error { + return errors.Wrap(errFailedToCreateSchemaQuery, inner) +} + +// NewErrFailedToCreateSchemaQuery returns a new error indicating that the query +// to create a schema failed to close. +func NewErrFailedToCloseSchemaQuery(inner error) error { + return errors.Wrap(errFailedToCloseSchemaQuery, inner) +} diff --git a/db/description/schema.go b/db/description/schema.go new file mode 100644 index 0000000000..5504c11ccf --- /dev/null +++ b/db/description/schema.go @@ -0,0 +1,177 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package description + +import ( + "context" + "encoding/json" + + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/core/cid" + "github.com/sourcenetwork/defradb/datastore" +) + +// CreateSchemaVersion creates and saves to the store a new schema version. +// +// If the SchemaID is empty it will be set to the new version ID. +func CreateSchemaVersion( + ctx context.Context, + txn datastore.Txn, + desc client.SchemaDescription, +) (client.SchemaDescription, error) { + for i := range desc.Fields { + // This is not wonderful and will probably break when we add the ability + // to delete fields, however it is good enough for now and matches the + // create behaviour. + desc.Fields[i].ID = client.FieldID(i) + } + + buf, err := json.Marshal(desc) + if err != nil { + return client.SchemaDescription{}, err + } + + scid, err := cid.NewSHA256CidV1(buf) + if err != nil { + return client.SchemaDescription{}, err + } + versionID := scid.String() + previousSchemaVersionID := desc.VersionID + isNew := desc.SchemaID == "" + + desc.VersionID = versionID + if isNew { + // If this is a new schema, the schema ID will match the version ID + desc.SchemaID = versionID + } + + // Rebuild the json buffer to include the newly set ID properties + buf, err = json.Marshal(desc) + if err != nil { + return client.SchemaDescription{}, err + } + + key := core.NewSchemaVersionKey(versionID) + err = txn.Systemstore().Put(ctx, key.ToDS(), buf) + if err != nil { + return client.SchemaDescription{}, err + } + + if !isNew { + // We don't need to add a history key if this is the first version + schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.SchemaID, previousSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(desc.VersionID)) + if err != nil { + return client.SchemaDescription{}, err + } + } + + return desc, nil +} + +// GetSchemaVersion returns the schema description for the schema version of the +// ID provided. +// +// Will return an error if it is not found. +func GetSchemaVersion( + ctx context.Context, + txn datastore.Txn, + versionID string, +) (client.SchemaDescription, error) { + key := core.NewSchemaVersionKey(versionID) + + buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return client.SchemaDescription{}, err + } + + var desc client.SchemaDescription + err = json.Unmarshal(buf, &desc) + if err != nil { + return client.SchemaDescription{}, err + } + + return desc, nil +} + +// GetSchemas returns the schema of all the default schema versions in the system. +func GetSchemas( + ctx context.Context, + txn datastore.Txn, +) ([]client.SchemaDescription, error) { + collectionSchemaVersionPrefix := core.NewCollectionSchemaVersionKey("") + collectionSchemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: collectionSchemaVersionPrefix.ToString(), + KeysOnly: true, + }) + if err != nil { + return nil, NewErrFailedToCreateSchemaQuery(err) + } + + versionIDs := make([]string, 0) + for res := range collectionSchemaVersionQuery.Next() { + if res.Error != nil { + if err := collectionSchemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + versionIDs = append(versionIDs, core.NewCollectionSchemaVersionKeyFromString(string(res.Key)).SchemaVersionId) + } + + if err := collectionSchemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + + schemaVersionPrefix := core.NewSchemaVersionKey("") + schemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: schemaVersionPrefix.ToString(), + }) + if err != nil { + return nil, NewErrFailedToCreateSchemaQuery(err) + } + + descriptions := make([]client.SchemaDescription, 0) + for res := range schemaVersionQuery.Next() { + if res.Error != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + var desc client.SchemaDescription + err = json.Unmarshal(res.Value, &desc) + if err != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + for _, versionID := range versionIDs { + if desc.VersionID == versionID { + descriptions = append(descriptions, desc) + break + } + } + } + + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + + return descriptions, nil +} diff --git a/db/index_test.go b/db/index_test.go index f8f3d0b8e6..70619d70f6 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -380,74 +380,6 @@ func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { assert.Equal(t, desc, deserialized) } -func TestCreateIndex_IfStorageFails_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - Name string - ExpectedError error - GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter - AlterDescription func(desc *client.IndexDescription) - }{ - { - Name: "call Has() for custom index name", - ExpectedError: testErr, - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Has(mock.Anything, mock.Anything).Unset() - store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) {}, - }, - { - Name: "call Has() for generated index name", - ExpectedError: testErr, - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Has(mock.Anything, mock.Anything).Unset() - store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) { - desc.Name = "" - }, - }, - { - Name: "fails to store index description", - ExpectedError: NewErrInvalidStoredIndex(nil), - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Unset() - key := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) - store.EXPECT().Put(mock.Anything, key.ToDS(), mock.Anything).Return(testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) {}, - }, - } - - for _, testCase := range testCases { - f := newIndexTestFixture(t) - - mockedTxn := f.mockTxn() - - mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) - f.stubSystemStore(mockedTxn.MockSystemstore.EXPECT()) - mockedTxn.EXPECT().Systemstore().Unset() - mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - - desc := client.IndexDescription{ - Name: testUsersColIndexName, - Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, - } - testCase.AlterDescription(&desc) - - _, err := f.createCollectionIndex(desc) - assert.ErrorIs(t, err, testErr, testCase.Name) - } -} - func TestCreateIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -738,43 +670,6 @@ func TestGetIndexes_IfSystemStoreHasInvalidData_ReturnError(t *testing.T) { assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } -func TestGetIndexes_IfFailsToReadSeqNumber_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - Name string - StubSystemStore func(*mocks.DSReaderWriter_Expecter, core.Key) - }{ - { - Name: "Read Sequence Number", - StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { - onSystemStore.Get(mock.Anything, seqKey.ToDS()).Return(nil, testErr) - }, - }, - { - Name: "Increment Sequence Number", - StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { - onSystemStore.Put(mock.Anything, seqKey.ToDS(), mock.Anything).Return(testErr) - }, - }, - } - - for _, tc := range testCases { - f := newIndexTestFixture(t) - - mockedTxn := f.mockTxn() - onSystemStore := mockedTxn.MockSystemstore.EXPECT() - f.resetSystemStoreStubs(onSystemStore) - - seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) - tc.StubSystemStore(onSystemStore, seqKey) - f.stubSystemStore(onSystemStore) - - _, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) - assert.ErrorIs(t, err, testErr) - } -} - func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) { f := newIndexTestFixture(t) @@ -1166,21 +1061,6 @@ func TestDropIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) } -func TestDropIndex_IfFailsToQuerySystemStorage_ReturnError(t *testing.T) { - f := newIndexTestFixture(t) - desc := f.createUserCollectionIndexOnName() - - testErr := errors.New("test error") - - mockTxn := f.mockTxn().ClearSystemStore() - systemStoreOn := mockTxn.MockSystemstore.EXPECT() - systemStoreOn.Query(mock.Anything, mock.Anything).Return(nil, testErr) - f.stubSystemStore(systemStoreOn) - - err := f.dropIndex(usersColName, desc.Name) - require.ErrorIs(t, err, testErr) -} - func TestDropIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { f := newIndexTestFixture(t) diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index 5b25fab21a..d28e02cd3c 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -661,86 +661,6 @@ func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { assert.Len(t, f.getPrefixFromDataStore(prodCatKey.ToString()), 1) } -func TestNonUniqueDrop_IfDataStorageFails_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - description string - prepareSystemStorage func(*mocks.DSReaderWriter_Expecter) - }{ - { - description: "Fails to query data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - mockedDS.Query(mock.Anything, mock.Anything).Unset() - mockedDS.Query(mock.Anything, mock.Anything).Return(nil, testErr) - }, - }, - { - description: "Fails to iterate data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - mockedDS.Query(mock.Anything, mock.Anything).Unset() - q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(nil) - }, - }, - { - description: "Fails to delete from data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(nil) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - mockedDS.Delete(mock.Anything, mock.Anything).Unset() - mockedDS.Delete(mock.Anything, mock.Anything).Return(testErr) - }, - }, - { - description: "Fails to close data storage query iterator", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(testErr) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - }, - }, - } - - for _, tc := range testCases { - f := newIndexTestFixture(t) - f.createUserCollectionIndexOnName() - - mockedTxn := f.mockTxn() - mockedTxn.MockDatastore = mocks.NewDSReaderWriter(t) - tc.prepareSystemStorage(mockedTxn.MockDatastore.EXPECT()) - mockedTxn.EXPECT().Datastore().Unset() - mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore) - - err := f.dropIndex(usersColName, testUsersColIndexName) - require.ErrorIs(t, err, testErr, tc.description) - } -} - -func TestNonUniqueDrop_ShouldCloseQueryIterator(t *testing.T) { - f := newIndexTestFixture(t) - f.createUserCollectionIndexOnName() - - mockedTxn := f.mockTxn() - - mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) - mockedTxn.EXPECT().Datastore().Unset() - mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() - queryResults := mocks.NewQueryResultsWithValues(f.t) - queryResults.EXPECT().Close().Unset() - queryResults.EXPECT().Close().Return(nil) - mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything). - Return(queryResults, nil) - - err := f.dropIndex(usersColName, testUsersColIndexName) - assert.NoError(t, err) -} - func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { f := newIndexTestFixture(t) f.createUserCollectionIndexOnName() diff --git a/db/schema.go b/db/schema.go index 3b3f4b6eb3..f5051f1a00 100644 --- a/db/schema.go +++ b/db/schema.go @@ -23,6 +23,7 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" ) const ( @@ -101,14 +102,14 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st return err } - collectionsByName, err := db.getCollectionsByName(ctx, txn) + schemas, err := description.GetSchemas(ctx, txn) if err != nil { return err } existingSchemaByName := map[string]client.SchemaDescription{} - for _, col := range collectionsByName { - existingSchemaByName[col.Schema.Name] = col.Schema + for _, schema := range schemas { + existingSchemaByName[schema.Name] = schema } // Here we swap out any string representations of enums for their integer values @@ -137,55 +138,24 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st newCollections := []client.CollectionDefinition{} for _, schema := range newSchemaByName { - if schema.Name == "" { - return ErrSchemaNameEmpty - } - - collectionDescription, ok := collectionsByName[schema.Name] - if !ok { - return NewErrAddCollectionWithPatch(schema.Name) - } - - def := client.CollectionDefinition{Description: collectionDescription, Schema: schema} - newCollections = append(newCollections, def) - } - - for i, col := range newCollections { col, err := db.updateSchema( ctx, txn, existingSchemaByName, newSchemaByName, - col, + schema, setAsDefaultVersion, ) if err != nil { return err } - newCollections[i] = col.Definition() + newCollections = append(newCollections, col.Definition()) } return db.parser.SetSchema(ctx, txn, newCollections) } -func (db *db) getCollectionsByName( - ctx context.Context, - txn datastore.Txn, -) (map[string]client.CollectionDescription, error) { - collections, err := db.getAllCollections(ctx, txn) - if err != nil { - return nil, err - } - - collectionsByName := map[string]client.CollectionDescription{} - for _, collection := range collections { - collectionsByName[collection.Name()] = collection.Description() - } - - return collectionsByName, nil -} - // substituteSchemaPatch handles any substitution of values that may be required before // the patch can be applied. // diff --git a/docs/data_format_changes/i1958-remove-col-description-schema.md b/docs/data_format_changes/i1958-remove-col-description-schema.md new file mode 100644 index 0000000000..3eadccae71 --- /dev/null +++ b/docs/data_format_changes/i1958-remove-col-description-schema.md @@ -0,0 +1,3 @@ +# Remove CollectionDescription.Schema + +The way schemas are stored has changed. Previously they were stored within a collection description, this PR splits them out to a new independent key. diff --git a/planner/sum.go b/planner/sum.go index c5ef06a03a..85371e5a30 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -82,7 +82,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := parentCol.Description().Schema.GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentCol.Schema().GetField(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } @@ -130,7 +130,7 @@ func (p *Planner) isValueFloat( return false, err } - fieldDescription, fieldDescriptionFound := childCol.Description().Schema.GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCol.Schema().GetField(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } From 28f207df7e5646933c7478cd343040e04e02c6bc Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 18 Oct 2023 10:19:40 -0700 Subject: [PATCH 31/55] feat: Add OpenAPI route (#1960) ## Relevant issue(s) Resolves #510 ## Description This PR adds an HTTP endpoint that returns an OpenAPI specification for DefraDB. The definitions are part code generation and part hand written. This should work well for adding examples and more documentation in the future. ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? View endpoint in browser `localhost:9181/api/v0/openapi`. Double checked with OpenAPI validator tool. Specify the platform(s) on which this was tested: - MacOS --- cli/start.go | 7 +- go.mod | 8 ++ go.sum | 21 ++++ http/handler.go | 130 +++++++++------------- http/handler_ccip.go | 50 +++++++++ http/handler_ccip_test.go | 18 ++- http/handler_collection.go | 200 ++++++++++++++++++++++++++++++++++ http/handler_lens.go | 101 +++++++++++++++++ http/handler_p2p.go | 114 +++++++++++++++++++ http/handler_store.go | 179 ++++++++++++++++++++++++++++++ http/handler_tx.go | 64 +++++++++++ http/openapi.go | 149 +++++++++++++++++++++++++ http/router.go | 68 ++++++++++++ http/server.go | 11 +- http/server_test.go | 52 ++++++--- tests/clients/cli/wrapper.go | 10 +- tests/clients/http/wrapper.go | 7 +- tests/integration/client.go | 2 +- 18 files changed, 1074 insertions(+), 117 deletions(-) create mode 100644 http/openapi.go create mode 100644 http/router.go diff --git a/cli/start.go b/cli/start.go index da99ae06ba..c3b86bb73d 100644 --- a/cli/start.go +++ b/cli/start.go @@ -274,9 +274,12 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { var server *httpapi.Server if node != nil { - server = httpapi.NewServer(node, sOpt...) + server, err = httpapi.NewServer(node, sOpt...) } else { - server = httpapi.NewServer(db, sOpt...) + server, err = httpapi.NewServer(db, sOpt...) + } + if err != nil { + return nil, errors.Wrap("failed to create http server", err) } if err := server.Listen(ctx); err != nil { return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err) diff --git a/go.mod b/go.mod index 708d1878ad..7b429d4f9b 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/dgraph-io/badger/v4 v4.1.0 github.com/evanphx/json-patch/v5 v5.7.0 github.com/fxamacker/cbor/v2 v2.5.0 + github.com/getkin/kin-openapi v0.120.0 github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/cors v1.2.1 github.com/go-errors/errors v1.5.1 @@ -73,6 +74,8 @@ require ( github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -95,6 +98,7 @@ require ( github.com/hsanjuan/ipfs-lite v1.4.1 // indirect github.com/huin/goupnp v1.2.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/go-bitswap v0.12.0 // indirect github.com/ipfs/go-blockservice v0.5.1 // indirect @@ -119,6 +123,7 @@ require ( github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/koron/go-ssdp v0.0.4 // indirect @@ -136,6 +141,7 @@ require ( github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.19 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect @@ -143,6 +149,7 @@ require ( github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect @@ -156,6 +163,7 @@ require ( github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect diff --git a/go.sum b/go.sum index b6dab22da8..bcc7478f86 100644 --- a/go.sum +++ b/go.sum @@ -152,6 +152,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -218,6 +219,8 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= +github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -242,10 +245,16 @@ github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -419,6 +428,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= @@ -593,6 +604,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -633,6 +646,7 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 h1:hC67vWtvuDnw8w6u4jLFoj3SOH92/4Lq8SCR++L7njw= github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25/go.mod h1:rDE4oJUIAQoXX9heUg8VOQf5LscRWj0BeE5mbGqOs3E= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= @@ -897,6 +911,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= @@ -955,6 +971,8 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -1081,6 +1099,8 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9 github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1834,6 +1854,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= diff --git a/http/handler.go b/http/handler.go index b9b5754419..1df8987964 100644 --- a/http/handler.go +++ b/http/handler.go @@ -11,6 +11,7 @@ package http import ( + "context" "fmt" "net/http" "sync" @@ -29,12 +30,12 @@ var Version string = "v0" var playgroundHandler http.Handler = http.HandlerFunc(http.NotFound) type Handler struct { - db client.DB - router *chi.Mux - txs *sync.Map + db client.DB + mux *chi.Mux + txs *sync.Map } -func NewHandler(db client.DB, opts ServerOptions) *Handler { +func NewHandler(db client.DB, opts ServerOptions) (*Handler, error) { txs := &sync.Map{} tx_handler := &txHandler{} @@ -44,86 +45,53 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler { lens_handler := &lensHandler{} ccip_handler := &ccipHandler{} - router := chi.NewRouter() - router.Use(middleware.RequestLogger(&logFormatter{})) - router.Use(middleware.Recoverer) - router.Use(CorsMiddleware(opts)) - router.Use(ApiMiddleware(db, txs, opts)) - - router.Route("/api/"+Version, func(api chi.Router) { - api.Use(TransactionMiddleware, StoreMiddleware) - api.Route("/tx", func(tx chi.Router) { - tx.Post("/", tx_handler.NewTxn) - tx.Post("/concurrent", tx_handler.NewConcurrentTxn) - tx.Post("/{id}", tx_handler.Commit) - tx.Delete("/{id}", tx_handler.Discard) - }) - api.Route("/backup", func(backup chi.Router) { - backup.Post("/export", store_handler.BasicExport) - backup.Post("/import", store_handler.BasicImport) - }) - api.Route("/schema", func(schema chi.Router) { - schema.Post("/", store_handler.AddSchema) - schema.Patch("/", store_handler.PatchSchema) - schema.Post("/default", store_handler.SetDefaultSchemaVersion) - }) - api.Route("/collections", func(collections chi.Router) { - collections.Get("/", store_handler.GetCollection) - // with collection middleware - collections_tx := collections.With(CollectionMiddleware) - collections_tx.Get("/{name}", collection_handler.GetAllDocKeys) - collections_tx.Post("/{name}", collection_handler.Create) - collections_tx.Patch("/{name}", collection_handler.UpdateWith) - collections_tx.Delete("/{name}", collection_handler.DeleteWith) - collections_tx.Post("/{name}/indexes", collection_handler.CreateIndex) - collections_tx.Get("/{name}/indexes", collection_handler.GetIndexes) - collections_tx.Delete("/{name}/indexes/{index}", collection_handler.DropIndex) - collections_tx.Get("/{name}/{key}", collection_handler.Get) - collections_tx.Patch("/{name}/{key}", collection_handler.Update) - collections_tx.Delete("/{name}/{key}", collection_handler.Delete) - }) - api.Route("/lens", func(lens chi.Router) { - lens.Use(LensMiddleware) - lens.Get("/", lens_handler.Config) - lens.Post("/", lens_handler.SetMigration) - lens.Post("/reload", lens_handler.ReloadLenses) - lens.Get("/{version}", lens_handler.HasMigration) - lens.Post("/{version}/up", lens_handler.MigrateUp) - lens.Post("/{version}/down", lens_handler.MigrateDown) - }) - api.Route("/graphql", func(graphQL chi.Router) { - graphQL.Get("/", store_handler.ExecRequest) - graphQL.Post("/", store_handler.ExecRequest) - }) - api.Route("/ccip", func(ccip chi.Router) { - ccip.Get("/{sender}/{data}", ccip_handler.ExecCCIP) - ccip.Post("/", ccip_handler.ExecCCIP) - }) - api.Route("/p2p", func(p2p chi.Router) { - p2p.Get("/info", p2p_handler.PeerInfo) - p2p.Route("/replicators", func(p2p_replicators chi.Router) { - p2p_replicators.Get("/", p2p_handler.GetAllReplicators) - p2p_replicators.Post("/", p2p_handler.SetReplicator) - p2p_replicators.Delete("/", p2p_handler.DeleteReplicator) - }) - p2p.Route("/collections", func(p2p_collections chi.Router) { - p2p_collections.Get("/", p2p_handler.GetAllP2PCollections) - p2p_collections.Post("/", p2p_handler.AddP2PCollection) - p2p_collections.Delete("/", p2p_handler.RemoveP2PCollection) - }) - }) - api.Route("/debug", func(debug chi.Router) { - debug.Get("/dump", store_handler.PrintDump) - }) + router, err := NewRouter() + if err != nil { + return nil, err + } + + router.AddMiddleware( + ApiMiddleware(db, txs, opts), + TransactionMiddleware, + StoreMiddleware, + ) + + tx_handler.bindRoutes(router) + store_handler.bindRoutes(router) + p2p_handler.bindRoutes(router) + ccip_handler.bindRoutes(router) + + router.AddRouteGroup(func(r *Router) { + r.AddMiddleware(CollectionMiddleware) + collection_handler.bindRoutes(r) + }) + + router.AddRouteGroup(func(r *Router) { + r.AddMiddleware(LensMiddleware) + lens_handler.bindRoutes(r) }) - router.Handle("/*", playgroundHandler) + if err := router.Validate(context.Background()); err != nil { + return nil, err + } + + mux := chi.NewMux() + mux.Use( + middleware.RequestLogger(&logFormatter{}), + middleware.Recoverer, + CorsMiddleware(opts), + ) + mux.Mount("/api/"+Version, router) + mux.Get("/openapi.json", func(rw http.ResponseWriter, req *http.Request) { + responseJSON(rw, http.StatusOK, router.OpenAPI()) + }) + mux.Handle("/*", playgroundHandler) return &Handler{ - db: db, - router: router, - txs: txs, - } + db: db, + mux: mux, + txs: txs, + }, nil } func (h *Handler) Transaction(id uint64) (datastore.Txn, error) { @@ -135,5 +103,5 @@ func (h *Handler) Transaction(id uint64) (datastore.Txn, error) { } func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - h.router.ServeHTTP(w, req) + h.mux.ServeHTTP(w, req) } diff --git a/http/handler_ccip.go b/http/handler_ccip.go index a0d1af7823..d2a9ad6783 100644 --- a/http/handler_ccip.go +++ b/http/handler_ccip.go @@ -16,6 +16,7 @@ import ( "net/http" "strings" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/defradb/client" @@ -72,3 +73,52 @@ func (c *ccipHandler) ExecCCIP(rw http.ResponseWriter, req *http.Request) { resultHex := "0x" + hex.EncodeToString(resultJSON) responseJSON(rw, http.StatusOK, CCIPResponse{Data: resultHex}) } + +func (h *ccipHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + ccipRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/ccip_request", + } + ccipResponseSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/ccip_response", + } + + ccipRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithJSONSchemaRef(ccipRequestSchema)) + + ccipResponse := openapi3.NewResponse(). + WithDescription("GraphQL response"). + WithContent(openapi3.NewContentWithJSONSchemaRef(ccipResponseSchema)) + + ccipPost := openapi3.NewOperation() + ccipPost.Description = "CCIP POST endpoint" + ccipPost.OperationID = "ccip_post" + ccipPost.Tags = []string{"ccip"} + ccipPost.RequestBody = &openapi3.RequestBodyRef{ + Value: ccipRequest, + } + ccipPost.AddResponse(200, ccipResponse) + ccipPost.Responses["400"] = errorResponse + + dataPathParam := openapi3.NewPathParameter("data"). + WithDescription("Hex encoded request data"). + WithSchema(openapi3.NewStringSchema()) + + senderPathParam := openapi3.NewPathParameter("sender"). + WithDescription("Hex encoded sender address"). + WithSchema(openapi3.NewStringSchema()) + + ccipGet := openapi3.NewOperation() + ccipGet.Description = "CCIP GET endpoint" + ccipGet.OperationID = "ccip_get" + ccipGet.Tags = []string{"ccip"} + ccipGet.AddParameter(dataPathParam) + ccipGet.AddParameter(senderPathParam) + ccipGet.AddResponse(200, ccipResponse) + ccipGet.Responses["400"] = errorResponse + + router.AddRoute("/ccip/{sender}/{data}", http.MethodGet, ccipGet, h.ExecCCIP) + router.AddRoute("/ccip", http.MethodPost, ccipPost, h.ExecCCIP) +} diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go index 4fb9e5259c..66ac173a54 100644 --- a/http/handler_ccip_test.go +++ b/http/handler_ccip_test.go @@ -49,7 +49,8 @@ func TestCCIPGet_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() @@ -87,7 +88,8 @@ func TestCCIPGet_WithSubscription(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() @@ -104,7 +106,8 @@ func TestCCIPGet_WithInvalidData(t *testing.T) { req := httptest.NewRequest(http.MethodGet, url, nil) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() @@ -132,7 +135,8 @@ func TestCCIPPost_WithValidData(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() @@ -163,7 +167,8 @@ func TestCCIPPost_WithInvalidGraphQLRequest(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() @@ -176,7 +181,8 @@ func TestCCIPPost_WithInvalidBody(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", nil) rec := httptest.NewRecorder() - handler := NewHandler(cdb, ServerOptions{}) + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) handler.ServeHTTP(rec, req) res := rec.Result() diff --git a/http/handler_collection.go b/http/handler_collection.go index 607c1f1b21..a5622f1336 100644 --- a/http/handler_collection.go +++ b/http/handler_collection.go @@ -17,6 +17,7 @@ import ( "net/http" "strconv" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/defradb/client" @@ -331,3 +332,202 @@ func (s *collectionHandler) DropIndex(rw http.ResponseWriter, req *http.Request) } rw.WriteHeader(http.StatusOK) } + +func (h *collectionHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + collectionUpdateSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection_update", + } + updateResultSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/update_result", + } + collectionDeleteSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection_delete", + } + deleteResultSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/delete_result", + } + documentSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/document", + } + indexSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/index", + } + + collectionNamePathParam := openapi3.NewPathParameter("name"). + WithDescription("Collection name"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + documentArraySchema := openapi3.NewArraySchema() + documentArraySchema.Items = documentSchema + + collectionCreateSchema := openapi3.NewOneOfSchema() + collectionCreateSchema.OneOf = openapi3.SchemaRefs{ + documentSchema, + openapi3.NewSchemaRef("", documentArraySchema), + } + + collectionCreateRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(collectionCreateSchema)) + + collectionCreate := openapi3.NewOperation() + collectionCreate.OperationID = "collection_create" + collectionCreate.Description = "Create document(s) in a collection" + collectionCreate.Tags = []string{"collection"} + collectionCreate.AddParameter(collectionNamePathParam) + collectionCreate.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionCreateRequest, + } + collectionCreate.Responses = make(openapi3.Responses) + collectionCreate.Responses["200"] = successResponse + collectionCreate.Responses["400"] = errorResponse + + collectionUpdateWithRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(collectionUpdateSchema)) + + collectionUpdateWithResponse := openapi3.NewResponse(). + WithDescription("Update results"). + WithJSONSchemaRef(updateResultSchema) + + collectionUpdateWith := openapi3.NewOperation() + collectionUpdateWith.OperationID = "collection_update_with" + collectionUpdateWith.Description = "Update document(s) in a collection" + collectionUpdateWith.Tags = []string{"collection"} + collectionUpdateWith.AddParameter(collectionNamePathParam) + collectionUpdateWith.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionUpdateWithRequest, + } + collectionUpdateWith.AddResponse(200, collectionUpdateWithResponse) + collectionUpdateWith.Responses["400"] = errorResponse + + collectionDeleteWithRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(collectionDeleteSchema)) + + collectionDeleteWithResponse := openapi3.NewResponse(). + WithDescription("Delete results"). + WithJSONSchemaRef(deleteResultSchema) + + collectionDeleteWith := openapi3.NewOperation() + collectionDeleteWith.OperationID = "collections_delete_with" + collectionDeleteWith.Description = "Delete document(s) from a collection" + collectionDeleteWith.Tags = []string{"collection"} + collectionDeleteWith.AddParameter(collectionNamePathParam) + collectionDeleteWith.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionDeleteWithRequest, + } + collectionDeleteWith.AddResponse(200, collectionDeleteWithResponse) + collectionDeleteWith.Responses["400"] = errorResponse + + createIndexRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(indexSchema)) + createIndexResponse := openapi3.NewResponse(). + WithDescription("Index description"). + WithJSONSchemaRef(indexSchema) + + createIndex := openapi3.NewOperation() + createIndex.OperationID = "index_create" + createIndex.Description = "Create a secondary index" + createIndex.Tags = []string{"index"} + createIndex.AddParameter(collectionNamePathParam) + createIndex.RequestBody = &openapi3.RequestBodyRef{ + Value: createIndexRequest, + } + createIndex.AddResponse(200, createIndexResponse) + createIndex.Responses["400"] = errorResponse + + indexArraySchema := openapi3.NewArraySchema() + indexArraySchema.Items = indexSchema + + getIndexesResponse := openapi3.NewResponse(). + WithDescription("List of indexes"). + WithJSONSchema(indexArraySchema) + + getIndexes := openapi3.NewOperation() + getIndexes.OperationID = "index_list" + getIndexes.Description = "List secondary indexes" + getIndexes.Tags = []string{"index"} + getIndexes.AddParameter(collectionNamePathParam) + getIndexes.AddResponse(200, getIndexesResponse) + getIndexes.Responses["400"] = errorResponse + + indexPathParam := openapi3.NewPathParameter("index"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + dropIndex := openapi3.NewOperation() + dropIndex.OperationID = "index_drop" + dropIndex.Description = "Delete a secondary index" + dropIndex.Tags = []string{"index"} + dropIndex.AddParameter(collectionNamePathParam) + dropIndex.AddParameter(indexPathParam) + dropIndex.Responses = make(openapi3.Responses) + dropIndex.Responses["200"] = successResponse + dropIndex.Responses["400"] = errorResponse + + documentKeyPathParam := openapi3.NewPathParameter("key"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + collectionGetResponse := openapi3.NewResponse(). + WithDescription("Document value"). + WithJSONSchemaRef(documentSchema) + + collectionGet := openapi3.NewOperation() + collectionGet.Description = "Get a document by key" + collectionGet.OperationID = "collection_get" + collectionGet.Tags = []string{"collection"} + collectionGet.AddParameter(collectionNamePathParam) + collectionGet.AddParameter(documentKeyPathParam) + collectionGet.AddResponse(200, collectionGetResponse) + collectionGet.Responses["400"] = errorResponse + + collectionUpdate := openapi3.NewOperation() + collectionUpdate.Description = "Update a document by key" + collectionUpdate.OperationID = "collection_update" + collectionUpdate.Tags = []string{"collection"} + collectionUpdate.AddParameter(collectionNamePathParam) + collectionUpdate.AddParameter(documentKeyPathParam) + collectionUpdate.Responses = make(openapi3.Responses) + collectionUpdate.Responses["200"] = successResponse + collectionUpdate.Responses["400"] = errorResponse + + collectionDelete := openapi3.NewOperation() + collectionDelete.Description = "Delete a document by key" + collectionDelete.OperationID = "collection_delete" + collectionDelete.Tags = []string{"collection"} + collectionDelete.AddParameter(collectionNamePathParam) + collectionDelete.AddParameter(documentKeyPathParam) + collectionDelete.Responses = make(openapi3.Responses) + collectionDelete.Responses["200"] = successResponse + collectionDelete.Responses["400"] = errorResponse + + collectionKeys := openapi3.NewOperation() + collectionKeys.AddParameter(collectionNamePathParam) + collectionKeys.Description = "Get all document keys" + collectionKeys.OperationID = "collection_keys" + collectionKeys.Tags = []string{"collection"} + collectionKeys.Responses = make(openapi3.Responses) + collectionKeys.Responses["200"] = successResponse + collectionKeys.Responses["400"] = errorResponse + + router.AddRoute("/collections/{name}", http.MethodGet, collectionKeys, h.GetAllDocKeys) + router.AddRoute("/collections/{name}", http.MethodPost, collectionCreate, h.Create) + router.AddRoute("/collections/{name}", http.MethodPatch, collectionUpdateWith, h.UpdateWith) + router.AddRoute("/collections/{name}", http.MethodDelete, collectionDeleteWith, h.DeleteWith) + router.AddRoute("/collections/{name}/indexes", http.MethodPost, createIndex, h.CreateIndex) + router.AddRoute("/collections/{name}/indexes", http.MethodGet, getIndexes, h.GetIndexes) + router.AddRoute("/collections/{name}/indexes/{index}", http.MethodDelete, dropIndex, h.DropIndex) + router.AddRoute("/collections/{name}/{key}", http.MethodGet, collectionGet, h.Get) + router.AddRoute("/collections/{name}/{key}", http.MethodPatch, collectionUpdate, h.Update) + router.AddRoute("/collections/{name}/{key}", http.MethodDelete, collectionDelete, h.Delete) +} diff --git a/http/handler_lens.go b/http/handler_lens.go index d5ddb704c8..a06a4d09f1 100644 --- a/http/handler_lens.go +++ b/http/handler_lens.go @@ -13,6 +13,7 @@ package http import ( "net/http" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/immutable/enumerable" @@ -121,3 +122,103 @@ func (s *lensHandler) HasMigration(rw http.ResponseWriter, req *http.Request) { } rw.WriteHeader(http.StatusOK) } + +func (h *lensHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + documentSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/document", + } + + lensConfigSchema := openapi3.NewSchemaRef("#/components/schemas/lens_config", nil) + lensConfigArraySchema := openapi3.NewArraySchema() + lensConfigArraySchema.Items = lensConfigSchema + + lensConfigResponse := openapi3.NewResponse(). + WithDescription("Lens configurations"). + WithJSONSchema(lensConfigArraySchema) + + lensConfig := openapi3.NewOperation() + lensConfig.OperationID = "lens_config" + lensConfig.Description = "List lens migrations" + lensConfig.Tags = []string{"lens"} + lensConfig.AddResponse(200, lensConfigResponse) + lensConfig.Responses["400"] = errorResponse + + setMigrationRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(lensConfigSchema) + + setMigration := openapi3.NewOperation() + setMigration.OperationID = "lens_set_migration" + setMigration.Description = "Add a new lens migration" + setMigration.Tags = []string{"lens"} + setMigration.RequestBody = &openapi3.RequestBodyRef{ + Value: setMigrationRequest, + } + setMigration.Responses = make(openapi3.Responses) + setMigration.Responses["200"] = successResponse + setMigration.Responses["400"] = errorResponse + + reloadLenses := openapi3.NewOperation() + reloadLenses.OperationID = "lens_reload" + reloadLenses.Description = "Reload lens migrations" + reloadLenses.Tags = []string{"lens"} + reloadLenses.Responses = make(openapi3.Responses) + reloadLenses.Responses["200"] = successResponse + reloadLenses.Responses["400"] = errorResponse + + versionPathParam := openapi3.NewPathParameter("version"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + hasMigration := openapi3.NewOperation() + hasMigration.OperationID = "lens_has_migration" + hasMigration.Description = "Check if a migration exists" + hasMigration.Tags = []string{"lens"} + hasMigration.AddParameter(versionPathParam) + hasMigration.Responses = make(openapi3.Responses) + hasMigration.Responses["200"] = successResponse + hasMigration.Responses["400"] = errorResponse + + migrateSchema := openapi3.NewArraySchema() + migrateSchema.Items = documentSchema + migrateRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(migrateSchema)) + + migrateUp := openapi3.NewOperation() + migrateUp.OperationID = "lens_migrate_up" + migrateUp.Description = "Migrate documents to a schema version" + migrateUp.Tags = []string{"lens"} + migrateUp.RequestBody = &openapi3.RequestBodyRef{ + Value: migrateRequest, + } + migrateUp.AddParameter(versionPathParam) + migrateUp.Responses = make(openapi3.Responses) + migrateUp.Responses["200"] = successResponse + migrateUp.Responses["400"] = errorResponse + + migrateDown := openapi3.NewOperation() + migrateDown.OperationID = "lens_migrate_down" + migrateDown.Description = "Migrate documents from a schema version" + migrateDown.Tags = []string{"lens"} + migrateDown.RequestBody = &openapi3.RequestBodyRef{ + Value: migrateRequest, + } + migrateDown.AddParameter(versionPathParam) + migrateDown.Responses = make(openapi3.Responses) + migrateDown.Responses["200"] = successResponse + migrateDown.Responses["400"] = errorResponse + + router.AddRoute("/lens", http.MethodGet, lensConfig, h.Config) + router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) + router.AddRoute("/lens/reload", http.MethodPost, reloadLenses, h.ReloadLenses) + router.AddRoute("/lens/{version}", http.MethodGet, hasMigration, h.HasMigration) + router.AddRoute("/lens/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) + router.AddRoute("/lens/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) +} diff --git a/http/handler_p2p.go b/http/handler_p2p.go index cec11b8325..73727ec297 100644 --- a/http/handler_p2p.go +++ b/http/handler_p2p.go @@ -13,6 +13,8 @@ package http import ( "net/http" + "github.com/getkin/kin-openapi/openapi3" + "github.com/sourcenetwork/defradb/client" ) @@ -136,3 +138,115 @@ func (s *p2pHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Requ } responseJSON(rw, http.StatusOK, cols) } + +func (h *p2pHandler) bindRoutes(router *Router) { + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + peerInfoSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/peer_info", + } + replicatorSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/replicator", + } + + peerInfoResponse := openapi3.NewResponse(). + WithDescription("Peer network info"). + WithContent(openapi3.NewContentWithJSONSchemaRef(peerInfoSchema)) + + peerInfo := openapi3.NewOperation() + peerInfo.OperationID = "peer_info" + peerInfo.Tags = []string{"p2p"} + peerInfo.AddResponse(200, peerInfoResponse) + peerInfo.Responses["400"] = errorResponse + + getReplicatorsSchema := openapi3.NewArraySchema() + getReplicatorsSchema.Items = replicatorSchema + getReplicatorsResponse := openapi3.NewResponse(). + WithDescription("Replicators"). + WithContent(openapi3.NewContentWithJSONSchema(getReplicatorsSchema)) + + getReplicators := openapi3.NewOperation() + getReplicators.Description = "List peer replicators" + getReplicators.OperationID = "peer_replicator_list" + getReplicators.Tags = []string{"p2p"} + getReplicators.AddResponse(200, getReplicatorsResponse) + getReplicators.Responses["400"] = errorResponse + + replicatorRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(replicatorSchema)) + + setReplicator := openapi3.NewOperation() + setReplicator.Description = "Add peer replicators" + setReplicator.OperationID = "peer_replicator_set" + setReplicator.Tags = []string{"p2p"} + setReplicator.RequestBody = &openapi3.RequestBodyRef{ + Value: replicatorRequest, + } + setReplicator.Responses = make(openapi3.Responses) + setReplicator.Responses["200"] = successResponse + setReplicator.Responses["400"] = errorResponse + + deleteReplicator := openapi3.NewOperation() + deleteReplicator.Description = "Delete peer replicators" + deleteReplicator.OperationID = "peer_replicator_delete" + deleteReplicator.Tags = []string{"p2p"} + deleteReplicator.RequestBody = &openapi3.RequestBodyRef{ + Value: replicatorRequest, + } + deleteReplicator.Responses = make(openapi3.Responses) + deleteReplicator.Responses["200"] = successResponse + deleteReplicator.Responses["400"] = errorResponse + + peerCollectionsSchema := openapi3.NewArraySchema(). + WithItems(openapi3.NewStringSchema()) + + peerCollectionRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(peerCollectionsSchema)) + + getPeerCollectionsResponse := openapi3.NewResponse(). + WithDescription("Peer collections"). + WithContent(openapi3.NewContentWithJSONSchema(peerCollectionsSchema)) + + getPeerCollections := openapi3.NewOperation() + getPeerCollections.Description = "List peer collections" + getPeerCollections.OperationID = "peer_collection_list" + getPeerCollections.Tags = []string{"p2p"} + getPeerCollections.AddResponse(200, getPeerCollectionsResponse) + getPeerCollections.Responses["400"] = errorResponse + + addPeerCollections := openapi3.NewOperation() + addPeerCollections.Description = "Add peer collections" + addPeerCollections.OperationID = "peer_collection_add" + addPeerCollections.Tags = []string{"p2p"} + addPeerCollections.RequestBody = &openapi3.RequestBodyRef{ + Value: peerCollectionRequest, + } + addPeerCollections.Responses = make(openapi3.Responses) + addPeerCollections.Responses["200"] = successResponse + addPeerCollections.Responses["400"] = errorResponse + + removePeerCollections := openapi3.NewOperation() + removePeerCollections.Description = "Remove peer collections" + removePeerCollections.OperationID = "peer_collection_remove" + removePeerCollections.Tags = []string{"p2p"} + removePeerCollections.RequestBody = &openapi3.RequestBodyRef{ + Value: peerCollectionRequest, + } + removePeerCollections.Responses = make(openapi3.Responses) + removePeerCollections.Responses["200"] = successResponse + removePeerCollections.Responses["400"] = errorResponse + + router.AddRoute("/p2p/info", http.MethodGet, peerInfo, h.PeerInfo) + router.AddRoute("/p2p/replicators", http.MethodGet, getReplicators, h.GetAllReplicators) + router.AddRoute("/p2p/replicators", http.MethodPost, setReplicator, h.SetReplicator) + router.AddRoute("/p2p/replicators", http.MethodDelete, deleteReplicator, h.DeleteReplicator) + router.AddRoute("/p2p/collections", http.MethodGet, getPeerCollections, h.GetAllP2PCollections) + router.AddRoute("/p2p/collections", http.MethodPost, addPeerCollections, h.AddP2PCollection) + router.AddRoute("/p2p/collections", http.MethodDelete, removePeerCollections, h.RemoveP2PCollection) +} diff --git a/http/handler_store.go b/http/handler_store.go index ce58383548..0b47069afc 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -17,6 +17,8 @@ import ( "io" "net/http" + "github.com/getkin/kin-openapi/openapi3" + "github.com/sourcenetwork/defradb/client" ) @@ -270,3 +272,180 @@ func (s *storeHandler) ExecRequest(rw http.ResponseWriter, req *http.Request) { } } } + +func (h *storeHandler) bindRoutes(router *Router) { + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + collectionSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection", + } + graphQLRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/graphql_request", + } + graphQLResponseSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/graphql_response", + } + backupConfigSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/backup_config", + } + patchSchemaRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/patch_schema_request", + } + + collectionArraySchema := openapi3.NewArraySchema() + collectionArraySchema.Items = collectionSchema + + addSchemaResponse := openapi3.NewResponse(). + WithDescription("Collection(s)"). + WithJSONSchema(collectionArraySchema) + + addSchemaRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithSchema(openapi3.NewStringSchema(), []string{"text/plain"})) + + addSchema := openapi3.NewOperation() + addSchema.OperationID = "add_schema" + addSchema.Description = "Add a new schema definition" + addSchema.Tags = []string{"schema"} + addSchema.RequestBody = &openapi3.RequestBodyRef{ + Value: addSchemaRequest, + } + addSchema.AddResponse(200, addSchemaResponse) + addSchema.Responses["400"] = errorResponse + + patchSchemaRequest := openapi3.NewRequestBody(). + WithJSONSchemaRef(patchSchemaRequestSchema) + + patchSchema := openapi3.NewOperation() + patchSchema.OperationID = "patch_schema" + patchSchema.Description = "Update a schema definition" + patchSchema.Tags = []string{"schema"} + patchSchema.RequestBody = &openapi3.RequestBodyRef{ + Value: patchSchemaRequest, + } + patchSchema.Responses = make(openapi3.Responses) + patchSchema.Responses["200"] = successResponse + patchSchema.Responses["400"] = errorResponse + + setDefaultSchemaVersionRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithSchema(openapi3.NewStringSchema(), []string{"text/plain"})) + + setDefaultSchemaVersion := openapi3.NewOperation() + setDefaultSchemaVersion.OperationID = "set_default_schema_version" + setDefaultSchemaVersion.Description = "Set the default schema version for a collection" + setDefaultSchemaVersion.Tags = []string{"schema"} + setDefaultSchemaVersion.RequestBody = &openapi3.RequestBodyRef{ + Value: setDefaultSchemaVersionRequest, + } + setDefaultSchemaVersion.Responses = make(openapi3.Responses) + setDefaultSchemaVersion.Responses["200"] = successResponse + setDefaultSchemaVersion.Responses["400"] = errorResponse + + backupRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(backupConfigSchema) + + backupExport := openapi3.NewOperation() + backupExport.OperationID = "backup_export" + backupExport.Description = "Export a database backup to file" + backupExport.Tags = []string{"backup"} + backupExport.Responses = make(openapi3.Responses) + backupExport.Responses["200"] = successResponse + backupExport.Responses["400"] = errorResponse + backupExport.RequestBody = &openapi3.RequestBodyRef{ + Value: backupRequest, + } + + backupImport := openapi3.NewOperation() + backupImport.OperationID = "backup_import" + backupImport.Description = "Import a database backup from file" + backupImport.Tags = []string{"backup"} + backupImport.Responses = make(openapi3.Responses) + backupImport.Responses["200"] = successResponse + backupImport.Responses["400"] = errorResponse + backupImport.RequestBody = &openapi3.RequestBodyRef{ + Value: backupRequest, + } + + collectionNameQueryParam := openapi3.NewQueryParameter("name"). + WithDescription("Collection name"). + WithSchema(openapi3.NewStringSchema()) + collectionSchemaIdQueryParam := openapi3.NewQueryParameter("schema_id"). + WithDescription("Collection schema id"). + WithSchema(openapi3.NewStringSchema()) + collectionVersionIdQueryParam := openapi3.NewQueryParameter("version_id"). + WithDescription("Collection schema version id"). + WithSchema(openapi3.NewStringSchema()) + + collectionsSchema := openapi3.NewArraySchema() + collectionsSchema.Items = collectionSchema + + collectionResponseSchema := openapi3.NewOneOfSchema() + collectionResponseSchema.OneOf = openapi3.SchemaRefs{ + collectionSchema, + openapi3.NewSchemaRef("", collectionsSchema), + } + + collectionsResponse := openapi3.NewResponse(). + WithDescription("Collection(s) with matching name, schema id, or version id."). + WithJSONSchema(collectionResponseSchema) + + collectionDescribe := openapi3.NewOperation() + collectionDescribe.OperationID = "collection_describe" + collectionDescribe.Description = "Introspect collection(s) by name, schema id, or version id." + collectionDescribe.Tags = []string{"collection"} + collectionDescribe.AddParameter(collectionNameQueryParam) + collectionDescribe.AddParameter(collectionSchemaIdQueryParam) + collectionDescribe.AddParameter(collectionVersionIdQueryParam) + collectionDescribe.AddResponse(200, collectionsResponse) + collectionDescribe.Responses["400"] = errorResponse + + graphQLRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithJSONSchemaRef(graphQLRequestSchema)) + + graphQLResponse := openapi3.NewResponse(). + WithDescription("GraphQL response"). + WithContent(openapi3.NewContentWithJSONSchemaRef(graphQLResponseSchema)) + + graphQLPost := openapi3.NewOperation() + graphQLPost.Description = "GraphQL POST endpoint" + graphQLPost.OperationID = "graphql_post" + graphQLPost.Tags = []string{"graphql"} + graphQLPost.RequestBody = &openapi3.RequestBodyRef{ + Value: graphQLRequest, + } + graphQLPost.AddResponse(200, graphQLResponse) + graphQLPost.Responses["400"] = errorResponse + + graphQLQueryParam := openapi3.NewQueryParameter("query"). + WithSchema(openapi3.NewStringSchema()) + + graphQLGet := openapi3.NewOperation() + graphQLGet.Description = "GraphQL GET endpoint" + graphQLGet.OperationID = "graphql_get" + graphQLGet.Tags = []string{"graphql"} + graphQLGet.AddParameter(graphQLQueryParam) + graphQLGet.AddResponse(200, graphQLResponse) + graphQLGet.Responses["400"] = errorResponse + + debugDump := openapi3.NewOperation() + debugDump.Description = "Dump database" + debugDump.OperationID = "debug_dump" + debugDump.Tags = []string{"debug"} + debugDump.Responses = make(openapi3.Responses) + debugDump.Responses["200"] = successResponse + debugDump.Responses["400"] = errorResponse + + router.AddRoute("/backup/export", http.MethodPost, backupExport, h.BasicExport) + router.AddRoute("/backup/import", http.MethodPost, backupImport, h.BasicImport) + router.AddRoute("/collections", http.MethodGet, collectionDescribe, h.GetCollection) + router.AddRoute("/graphql", http.MethodGet, graphQLGet, h.ExecRequest) + router.AddRoute("/graphql", http.MethodPost, graphQLPost, h.ExecRequest) + router.AddRoute("/debug/dump", http.MethodGet, debugDump, h.PrintDump) + router.AddRoute("/schema", http.MethodPost, addSchema, h.AddSchema) + router.AddRoute("/schema", http.MethodPatch, patchSchema, h.PatchSchema) + router.AddRoute("/schema/default", http.MethodPost, setDefaultSchemaVersion, h.SetDefaultSchemaVersion) +} diff --git a/http/handler_tx.go b/http/handler_tx.go index b7f1c82545..6bdb6b2009 100644 --- a/http/handler_tx.go +++ b/http/handler_tx.go @@ -15,6 +15,7 @@ import ( "strconv" "sync" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/defradb/client" @@ -93,3 +94,66 @@ func (h *txHandler) Discard(rw http.ResponseWriter, req *http.Request) { txVal.(datastore.Txn).Discard(req.Context()) rw.WriteHeader(http.StatusOK) } + +func (h *txHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + createTxSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/create_tx", + } + + txnReadOnlyQueryParam := openapi3.NewQueryParameter("read_only"). + WithDescription("Read only transaction"). + WithSchema(openapi3.NewBoolSchema().WithDefault(false)) + + txnCreateResponse := openapi3.NewResponse(). + WithDescription("Transaction info"). + WithJSONSchemaRef(createTxSchema) + + txnCreate := openapi3.NewOperation() + txnCreate.OperationID = "new_transaction" + txnCreate.Description = "Create a new transaction" + txnCreate.Tags = []string{"transaction"} + txnCreate.AddParameter(txnReadOnlyQueryParam) + txnCreate.AddResponse(200, txnCreateResponse) + txnCreate.Responses["400"] = errorResponse + + txnConcurrent := openapi3.NewOperation() + txnConcurrent.OperationID = "new_concurrent_transaction" + txnConcurrent.Description = "Create a new concurrent transaction" + txnConcurrent.Tags = []string{"transaction"} + txnConcurrent.AddParameter(txnReadOnlyQueryParam) + txnConcurrent.AddResponse(200, txnCreateResponse) + txnConcurrent.Responses["400"] = errorResponse + + txnIdPathParam := openapi3.NewPathParameter("id"). + WithRequired(true). + WithSchema(openapi3.NewInt64Schema()) + + txnCommit := openapi3.NewOperation() + txnCommit.OperationID = "transaction_commit" + txnCommit.Description = "Commit a transaction" + txnCommit.Tags = []string{"transaction"} + txnCommit.AddParameter(txnIdPathParam) + txnCommit.Responses = make(openapi3.Responses) + txnCommit.Responses["200"] = successResponse + txnCommit.Responses["400"] = errorResponse + + txnDiscard := openapi3.NewOperation() + txnDiscard.OperationID = "transaction_discard" + txnDiscard.Description = "Discard a transaction" + txnDiscard.Tags = []string{"transaction"} + txnDiscard.AddParameter(txnIdPathParam) + txnDiscard.Responses = make(openapi3.Responses) + txnDiscard.Responses["200"] = successResponse + txnDiscard.Responses["400"] = errorResponse + + router.AddRoute("/tx", http.MethodPost, txnCreate, h.NewTxn) + router.AddRoute("/tx/concurrent", http.MethodPost, txnConcurrent, h.NewConcurrentTxn) + router.AddRoute("/tx/{id}", http.MethodPost, txnCommit, h.Commit) + router.AddRoute("/tx/{id}", http.MethodDelete, txnDiscard, h.Discard) +} diff --git a/http/openapi.go b/http/openapi.go new file mode 100644 index 0000000000..88a8f2097d --- /dev/null +++ b/http/openapi.go @@ -0,0 +1,149 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/openapi3gen" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +// openApiSchemas is a mapping of types to auto generate schemas for. +var openApiSchemas = map[string]any{ + "error": &errorResponse{}, + "create_tx": &CreateTxResponse{}, + "collection_update": &CollectionUpdateRequest{}, + "collection_delete": &CollectionDeleteRequest{}, + "peer_info": &peer.AddrInfo{}, + "graphql_request": &GraphQLRequest{}, + "graphql_response": &GraphQLResponse{}, + "backup_config": &client.BackupConfig{}, + "collection": &client.CollectionDescription{}, + "index": &client.IndexDescription{}, + "delete_result": &client.DeleteResult{}, + "update_result": &client.UpdateResult{}, + "lens_config": &client.LensConfig{}, + "replicator": &client.Replicator{}, + "ccip_request": &CCIPRequest{}, + "ccip_response": &CCIPResponse{}, + "patch_schema_request": &patchSchemaRequest{}, +} + +func NewOpenAPISpec() (*openapi3.T, error) { + schemas := make(openapi3.Schemas) + responses := make(openapi3.Responses) + parameters := make(openapi3.ParametersMap) + + generator := openapi3gen.NewGenerator(openapi3gen.UseAllExportedFields()) + for key, val := range openApiSchemas { + ref, err := generator.NewSchemaRefForValue(val, schemas) + if err != nil { + return nil, err + } + schemas[key] = ref + } + + errorSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/error", + } + + errorResponse := openapi3.NewResponse(). + WithDescription("error"). + WithContent(openapi3.NewContentWithJSONSchemaRef(errorSchema)) + + successResponse := openapi3.NewResponse(). + WithDescription("ok") + + txnHeaderParam := openapi3.NewHeaderParameter("x-defradb-tx"). + WithDescription("Transaction id"). + WithSchema(openapi3.NewInt64Schema()) + + // add common schemas, responses, and params so we can reference them + schemas["document"] = &openapi3.SchemaRef{ + Value: openapi3.NewObjectSchema().WithAnyAdditionalProperties(), + } + responses["success"] = &openapi3.ResponseRef{ + Value: successResponse, + } + responses["error"] = &openapi3.ResponseRef{ + Value: errorResponse, + } + parameters["txn"] = &openapi3.ParameterRef{ + Value: txnHeaderParam, + } + + return &openapi3.T{ + OpenAPI: "3.0.3", + Info: &openapi3.Info{ + Title: "DefraDB API", + Version: "0", + }, + Paths: make(openapi3.Paths), + Servers: openapi3.Servers{ + &openapi3.Server{ + Description: "Local DefraDB instance", + URL: "http://localhost:9181/api/v0", + }, + }, + ExternalDocs: &openapi3.ExternalDocs{ + Description: "Learn more about DefraDB", + URL: "https://docs.source.network", + }, + Components: &openapi3.Components{ + Schemas: schemas, + Responses: responses, + Parameters: parameters, + }, + Tags: openapi3.Tags{ + &openapi3.Tag{ + Name: "schema", + Description: "Add or update schema definitions", + }, + &openapi3.Tag{ + Name: "collection", + Description: "Add, remove, or update documents", + }, + &openapi3.Tag{ + Name: "index", + Description: "Add, update, or remove indexes", + }, + &openapi3.Tag{ + Name: "lens", + Description: "Migrate documents to and from schema versions", + }, + &openapi3.Tag{ + Name: "p2p", + Description: "Peer-to-peer network operations", + }, + &openapi3.Tag{ + Name: "transaction", + Description: "Database transaction operations", + }, + &openapi3.Tag{ + Name: "backup", + Description: "Database backup operations", + }, + &openapi3.Tag{ + Name: "graphql", + Description: "GraphQL query endpoints", + }, + &openapi3.Tag{ + Name: "ccip", + ExternalDocs: &openapi3.ExternalDocs{ + Description: "EIP-3668", + URL: "https://eips.ethereum.org/EIPS/eip-3668", + }, + }, + }, + }, nil +} diff --git a/http/router.go b/http/router.go new file mode 100644 index 0000000000..ce8d4fc62f --- /dev/null +++ b/http/router.go @@ -0,0 +1,68 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "context" + "net/http" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-chi/chi/v5" +) + +type Router struct { + mux chi.Router + oas *openapi3.T +} + +func NewRouter() (*Router, error) { + oas, err := NewOpenAPISpec() + if err != nil { + return nil, err + } + return &Router{chi.NewMux(), oas}, nil +} + +// AddMiddleware adds middleware functions to the current route group. +func (r *Router) AddMiddleware(middlewares ...func(http.Handler) http.Handler) { + r.mux.Use(middlewares...) +} + +// RouteGroup adds handlers as a group. +func (r *Router) AddRouteGroup(group func(*Router)) { + r.mux.Group(func(router chi.Router) { + group(&Router{router, r.oas}) + }) +} + +// AddRoute adds a handler for the given route. +func (r *Router) AddRoute(pattern, method string, op *openapi3.Operation, handler http.HandlerFunc) { + r.mux.MethodFunc(method, pattern, handler) + r.oas.AddOperation(pattern, method, op) +} + +// Validate returns an error if the OpenAPI specification is invalid. +func (r *Router) Validate(ctx context.Context) error { + loader := openapi3.NewLoader() + if err := loader.ResolveRefsIn(r.oas, nil); err != nil { + return err + } + return r.oas.Validate(ctx) +} + +// OpenAPI returns the OpenAPI specification. +func (r *Router) OpenAPI() *openapi3.T { + return r.oas +} + +func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + r.mux.ServeHTTP(rw, req) +} diff --git a/http/server.go b/http/server.go index 854a73f506..384264a8a6 100644 --- a/http/server.go +++ b/http/server.go @@ -81,7 +81,7 @@ type TLSOptions struct { } // NewServer instantiates a new server with the given http.Handler. -func NewServer(db client.DB, options ...func(*Server)) *Server { +func NewServer(db client.DB, options ...func(*Server)) (*Server, error) { srv := &Server{ Server: http.Server{ ReadTimeout: readTimeout, @@ -94,9 +94,12 @@ func NewServer(db client.DB, options ...func(*Server)) *Server { opt(srv) } - srv.Handler = NewHandler(db, srv.options) - - return srv + handler, err := NewHandler(db, srv.options) + if err != nil { + return nil, err + } + srv.Handler = handler + return srv, nil } func newHTTPRedirServer(m *autocert.Manager) *Server { diff --git a/http/server_test.go b/http/server_test.go index 33db303454..5e970ad317 100644 --- a/http/server_test.go +++ b/http/server_test.go @@ -17,12 +17,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/crypto/acme/autocert" ) func TestNewServerAndRunWithoutListener(t *testing.T) { ctx := context.Background() - s := NewServer(nil, WithAddress(":0")) + s, err := NewServer(nil, WithAddress(":0")) + require.NoError(t, err) if ok := assert.NotNil(t, s); ok { assert.Equal(t, ErrNoListener, s.Run(ctx)) } @@ -30,7 +32,8 @@ func TestNewServerAndRunWithoutListener(t *testing.T) { func TestNewServerAndRunWithListenerAndInvalidPort(t *testing.T) { ctx := context.Background() - s := NewServer(nil, WithAddress(":303000")) + s, err := NewServer(nil, WithAddress(":303000")) + require.NoError(t, err) if ok := assert.NotNil(t, s); ok { assert.Error(t, s.Listen(ctx)) } @@ -40,7 +43,8 @@ func TestNewServerAndRunWithListenerAndValidPort(t *testing.T) { ctx := context.Background() serverRunning := make(chan struct{}) serverDone := make(chan struct{}) - s := NewServer(nil, WithAddress(":0")) + s, err := NewServer(nil, WithAddress(":0")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -60,9 +64,9 @@ func TestNewServerAndRunWithListenerAndValidPort(t *testing.T) { func TestNewServerAndRunWithAutocertWithoutEmail(t *testing.T) { ctx := context.Background() dir := t.TempDir() - s := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0)) - - err := s.Listen(ctx) + s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0)) + require.NoError(t, err) + err = s.Listen(ctx) assert.ErrorIs(t, err, ErrNoEmail) s.Shutdown(context.Background()) @@ -73,7 +77,8 @@ func TestNewServerAndRunWithAutocert(t *testing.T) { serverRunning := make(chan struct{}) serverDone := make(chan struct{}) dir := t.TempDir() - s := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0), WithCAEmail("dev@defradb.net")) + s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0), WithCAEmail("dev@defradb.net")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -95,7 +100,8 @@ func TestNewServerAndRunWithSelfSignedCertAndNoKeyFiles(t *testing.T) { serverRunning := make(chan struct{}) serverDone := make(chan struct{}) dir := t.TempDir() - s := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -149,7 +155,8 @@ func TestNewServerAndRunWithSelfSignedCertAndInvalidPort(t *testing.T) { if err != nil { t.Fatal(err) } - s := NewServer(nil, WithAddress(":303000"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress(":303000"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -177,7 +184,8 @@ func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { if err != nil { t.Fatal(err) } - s := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -195,45 +203,53 @@ func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { } func TestNewServerWithoutOptions(t *testing.T) { - s := NewServer(nil) + s, err := NewServer(nil) + require.NoError(t, err) assert.Equal(t, "localhost:9181", s.Addr) assert.Equal(t, []string(nil), s.options.AllowedOrigins) } func TestNewServerWithAddress(t *testing.T) { - s := NewServer(nil, WithAddress("localhost:9999")) + s, err := NewServer(nil, WithAddress("localhost:9999")) + require.NoError(t, err) assert.Equal(t, "localhost:9999", s.Addr) } func TestNewServerWithDomainAddress(t *testing.T) { - s := NewServer(nil, WithAddress("example.com")) + s, err := NewServer(nil, WithAddress("example.com")) + require.NoError(t, err) assert.Equal(t, "example.com", s.options.Domain.Value()) assert.NotNil(t, s.options.TLS) } func TestNewServerWithAllowedOrigins(t *testing.T) { - s := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) + s, err := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) + require.NoError(t, err) assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.AllowedOrigins) } func TestNewServerWithCAEmail(t *testing.T) { - s := NewServer(nil, WithCAEmail("me@example.com")) + s, err := NewServer(nil, WithCAEmail("me@example.com")) + require.NoError(t, err) assert.Equal(t, "me@example.com", s.options.TLS.Value().Email) } func TestNewServerWithRootDir(t *testing.T) { dir := t.TempDir() - s := NewServer(nil, WithRootDir(dir)) + s, err := NewServer(nil, WithRootDir(dir)) + require.NoError(t, err) assert.Equal(t, dir, s.options.RootDir) } func TestNewServerWithTLSPort(t *testing.T) { - s := NewServer(nil, WithTLSPort(44343)) + s, err := NewServer(nil, WithTLSPort(44343)) + require.NoError(t, err) assert.Equal(t, ":44343", s.options.TLS.Value().Port) } func TestNewServerWithSelfSignedCert(t *testing.T) { - s := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) + s, err := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) + require.NoError(t, err) assert.Equal(t, "pub.key", s.options.TLS.Value().PublicKey) assert.Equal(t, "priv.key", s.options.TLS.Value().PrivateKey) assert.NotNil(t, s.options.TLS) diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go index 8db991063e..bdfc892817 100644 --- a/tests/clients/cli/wrapper.go +++ b/tests/clients/cli/wrapper.go @@ -39,8 +39,12 @@ type Wrapper struct { httpServer *httptest.Server } -func NewWrapper(node *net.Node) *Wrapper { - handler := http.NewHandler(node, http.ServerOptions{}) +func NewWrapper(node *net.Node) (*Wrapper, error) { + handler, err := http.NewHandler(node, http.ServerOptions{}) + if err != nil { + return nil, err + } + httpServer := httptest.NewServer(handler) cmd := newCliWrapper(httpServer.URL) @@ -49,7 +53,7 @@ func NewWrapper(node *net.Node) *Wrapper { cmd: cmd, httpServer: httpServer, handler: handler, - } + }, nil } func (w *Wrapper) PeerInfo() peer.AddrInfo { diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go index 35cd55f466..e48297921a 100644 --- a/tests/clients/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -36,9 +36,12 @@ type Wrapper struct { } func NewWrapper(node *net.Node) (*Wrapper, error) { - handler := http.NewHandler(node, http.ServerOptions{}) - httpServer := httptest.NewServer(handler) + handler, err := http.NewHandler(node, http.ServerOptions{}) + if err != nil { + return nil, err + } + httpServer := httptest.NewServer(handler) client, err := http.NewClient(httpServer.URL) if err != nil { return nil, err diff --git a/tests/integration/client.go b/tests/integration/client.go index a6159900cc..1d06bfc744 100644 --- a/tests/integration/client.go +++ b/tests/integration/client.go @@ -69,7 +69,7 @@ func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { impl, err = http.NewWrapper(node) case CLIClientType: - impl = cli.NewWrapper(node) + impl, err = cli.NewWrapper(node) case GoClientType: impl = node From 9cfd33fb7079d5c505fc76873b5cdaae414de6e5 Mon Sep 17 00:00:00 2001 From: Keenan Nemetz Date: Wed, 18 Oct 2023 13:02:23 -0700 Subject: [PATCH 32/55] feat: Add Swagger UI to playground (#1979) ## Relevant issue(s) Resolves #1978 ## Description This PR updates the playground with a [Swagger UI](https://swagger.io/tools/swagger-ui/) tab that allows users to view and interact with the DefraDB API. The UI is auto generated from the OpenAPI specification served from the `/openapi.json` endpoint. Screenshot 2023-10-17 at 11 02 12 AM ## Tasks - [x] I made sure the code is well commented, particularly hard-to-understand areas. - [x] I made sure the repository-held documentation is changed accordingly. - [x] I made sure the pull request title adheres to the conventional commit style (the subset used in the project can be found in [tools/configs/chglog/config.yml](tools/configs/chglog/config.yml)). - [x] I made sure to discuss its limitations such as threats to validity, vulnerability to mistake and misuse, robustness to invalidation of assumptions, resource requirements, ... ## How has this been tested? Manually Specify the platform(s) on which this was tested: - MacOS --- playground/package-lock.json | 2245 ++++++++++++++++- playground/package.json | 5 +- playground/src/App.tsx | 25 +- playground/src/components/Plugin.tsx | 57 - playground/src/components/SchemaLoadForm.tsx | 81 - playground/src/components/SchemaPatchForm.tsx | 117 - playground/src/index.css | 105 +- playground/src/lib/api.ts | 77 - 8 files changed, 2167 insertions(+), 545 deletions(-) delete mode 100644 playground/src/components/Plugin.tsx delete mode 100644 playground/src/components/SchemaLoadForm.tsx delete mode 100644 playground/src/components/SchemaPatchForm.tsx delete mode 100644 playground/src/lib/api.ts diff --git a/playground/package-lock.json b/playground/package-lock.json index 77ca69d25a..e3449ff081 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,17 +8,16 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "@tanstack/react-query": "^4.36.1", - "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.47.0" + "swagger-ui-react": "^5.9.0" }, "devDependencies": { "@types/react": "^18.2.25", "@types/react-dom": "^18.2.13", + "@types/swagger-ui-react": "^4.18.1", "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.5", "@vitejs/plugin-react-swc": "^3.4.0", @@ -49,6 +48,23 @@ "node": ">=6.9.0" } }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.2.tgz", + "integrity": "sha512-54cIh74Z1rp4oIjsHjqN+WM4fMyCBYe+LpZ9jWm51CZ1fbH3SkAzQD/3XLoNkjbJ7YEmjobLXyvQrFypRHOrXw==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", + "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==" + }, "node_modules/@codemirror/language": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.0.0.tgz", @@ -503,6 +519,14 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/@fastify/busboy": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.0.0.tgz", + "integrity": "sha512-JUFJad5lv7jxj926GPgymrWQxxjPYuJNiNjNMzqT+HiuP6Vl3dk5xzG+8sTX96np0ZAluvaMzPsjhHZ5rNuNQQ==", + "engines": { + "node": ">=14" + } + }, "node_modules/@floating-ui/core": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.5.0.tgz", @@ -1377,6 +1401,380 @@ "@babel/runtime": "^7.13.10" } }, + "node_modules/@swagger-api/apidom-ast": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.78.0.tgz", + "integrity": "sha512-mEXmRmkFlmO6dcBuakFkc2gevN4mC6incPAQE1UciaX4hLuJpiv/5DTH9gVWTR0CWUFw/dXROTD/x6ETV0y03A==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-error": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "unraw": "^3.0.0" + } + }, + "node_modules/@swagger-api/apidom-core": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.78.0.tgz", + "integrity": "sha512-Qx9m+1u6H4Bsa38s73ANtGn8zFGqK0peguM+SFuUR5HirjpoFB8JB7IG5E8+ymUlpWhlU43q9QnJjcaYJw9MTg==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@types/ramda": "~0.29.6", + "minim": "~0.23.8", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "short-unique-id": "^5.0.2", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-error": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-0.78.0.tgz", + "integrity": "sha512-P0enIK3XymxCPHlhGtqc4TU5H+cHf7L0yDFmfjZEcsjDzGDv5A+m5tf429Pr/R+e51DzpT5/xIcPKTnti0gIOw==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-json-pointer": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.78.0.tgz", + "integrity": "sha512-Ly4ZfUGxxbNoHHc9vR814mU96ZLGsjaJflCW0jdZnMVfVv20fDCoDoOOmXat6ajxUbS2YKimgxPvdBth3K/CRQ==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-ns-api-design-systems": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.78.0.tgz", + "integrity": "sha512-WoWE6w1P3qsokG3Qyc5F3xpz+e/WablE0EHGSgiYxk+MQJLqYmz5UhS5LxYGT9d6o9XUs24ykSbKrYWYwkpp4w==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-asyncapi-2": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.78.0.tgz", + "integrity": "sha512-QWZohCtXf5UX/I9bnc4MQh16X9jGPdGrByWM93xRvh8X8rIF0BtF9S7lIx028aX3AHYIu4SwYr7JZlqEaZ92Kw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-json-schema-draft-7": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.78.0.tgz", + "integrity": "sha512-19NR9lTHMOQTIEV4tJq+FlHQAYnjyH+DgI4mmRu6UMFSZjRjutYF7B8lCGogSus9Uwy8YpUk00prLFTld00wgA==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.78.0", + "@swagger-api/apidom-core": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.78.0.tgz", + "integrity": "sha512-pHyCPU3OWDiPuLepo03rBpi2n+SCH6PZAgguqAB3lDJ2ymitrT2SNpmZ6CcHvPGR9Y7h4/fR5vAypVZfdNr/WQ==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.78.0.tgz", + "integrity": "sha512-ScUiNNAdwnikH3Fo2rUsDmXOjV7zXfQ6CGE+QkY5Wj3t1M6siw2HpDjrBaaCyp6w/bemvogsh280GrzAnxKLIw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@swagger-api/apidom-ns-json-schema-draft-6": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-openapi-3-0": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.78.0.tgz", + "integrity": "sha512-GRmUOknEzMG37y5sStvjEsk30RLVg5E7iZuougK1rEf+wzzX5XhorSgMx2NQmka5rb814BgzyiqGRmvKQErDBw==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-openapi-3-1": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.78.0.tgz", + "integrity": "sha512-hHpUZLjIiaLK+99cAPiYNV9QzZQxFoMLqBNYo+GQwqizaVOjxQRi5y/hPkfFALqqufZ1L6XWeyjQrtli0ftqBQ==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.78.0", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.78.0.tgz", + "integrity": "sha512-g7VlfOrpTzbVV30Ugab0qAJITavLo39apvyFFv2cN2jfuIQa8MlzDP0mZmVtCGQy3IoT4Auns/qWeGcZX0li9w==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.78.0", + "@swagger-api/apidom-parser-adapter-json": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.78.0.tgz", + "integrity": "sha512-ZueYoHOJARRm84ntCggUZLKNwUHz2U0eG9KHIzw75UW43pyvQVbxAE2ELdyP5f8vr51wMuMp6XYRcFOsNi/oeQ==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.78.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.78.0.tgz", + "integrity": "sha512-Jm0hbNXWOH2QJIiF+5QgY+ioVSOBqV3WlhTeyrF5kSxHinah16nR1jUkz5tMsSc9sxTZHzWYVLneyBMW3VSHrw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.78.0", + "@swagger-api/apidom-parser-adapter-json": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.78.0.tgz", + "integrity": "sha512-zpP8gQBXhrR/t91Z/Jl0nD/cUSzmYjzhE5qWHkfhbGvzaWatiLrNY+CnFS9RcgF4pb2LSqS5cjDVAExBbjdLdQ==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.78.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-json": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.78.0.tgz", + "integrity": "sha512-d/8gFj5cc+pnCo7ORGN5dJPGWzTleYkIwGfsyFuLZNjb4KlrOrKlPl0LKQ/t7MSEbVpSStxbgezoUtfdVhGscw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.78.0", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "tree-sitter": "=0.20.4", + "tree-sitter-json": "=0.20.1", + "web-tree-sitter": "=0.20.3" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.78.0.tgz", + "integrity": "sha512-MjXkPAiEyTZIljzjEgvAmqaZel0jpKBBqdtC8nWH/9C2ugkKHetKMSgYu+5wvFh//ixJZZE7dM1QHEIBoPl9nA==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.78.0", + "@swagger-api/apidom-parser-adapter-json": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.78.0.tgz", + "integrity": "sha512-k+rT6kwu1jAN1lYIP1wVshQdaLu9M+jjCfpvMXXkL/2VpZqq1yP6daFm0ExiHllVUcHWeqRXhubFV3wWkFm6eA==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.78.0", + "@swagger-api/apidom-parser-adapter-json": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.78.0.tgz", + "integrity": "sha512-RzcqL0kvUl5G75H4qOFSi9FTaVfBtRnjzEcjd8SOKVLg3JJsCv3vrk68laRm8HXocyWgGstU51UzBqkMStXy4A==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.78.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.78.0.tgz", + "integrity": "sha512-1hB+mcEJd14RJC8lH3yJsoQRDhA8TNNKl3EyQ17eFY0dK29JlluDEbDHIRQpLT1l2jCK/NfqAk2hc37yIwydfw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.78.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.78.0.tgz", + "integrity": "sha512-L37X+nRNp+2PyJkAwMdSQjP8tb3xoc6FVk2QXLHogghe1Phrmfaal3TPu2rWJNn7NSBcvSyiTAR7gEIULitugA==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.78.0", + "@swagger-api/apidom-core": "^0.78.0", + "@swagger-api/apidom-error": "^0.78.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "tree-sitter": "=0.20.4", + "tree-sitter-yaml": "=0.5.0", + "web-tree-sitter": "=0.20.3" + } + }, + "node_modules/@swagger-api/apidom-reference": { + "version": "0.78.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.78.0.tgz", + "integrity": "sha512-IiOaMgy+CzpQe5fFwyge4B/lkHQnBhiuNGPgIJELYXJMZle+pN6K/V4muLCG6JjAXllucbCqMpW/KLmPxGAXaw==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.78.0", + "@types/ramda": "~0.29.6", + "axios": "^1.4.0", + "minimatch": "^7.4.3", + "process": "^0.11.10", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + }, + "optionalDependencies": { + "@swagger-api/apidom-error": "^0.78.0", + "@swagger-api/apidom-json-pointer": "^0.78.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.78.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.78.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.78.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.78.0", + "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.78.0", + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.78.0", + "@swagger-api/apidom-parser-adapter-json": "^0.78.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.78.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.78.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.78.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.78.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.78.0" + } + }, + "node_modules/@swagger-api/apidom-reference/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@swagger-api/apidom-reference/node_modules/minimatch": { + "version": "7.4.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-7.4.6.tgz", + "integrity": "sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw==", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/@swc/core": { "version": "1.3.91", "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.91.tgz", @@ -1587,41 +1985,6 @@ "integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==", "dev": true }, - "node_modules/@tanstack/query-core": { - "version": "4.36.1", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.36.1.tgz", - "integrity": "sha512-DJSilV5+ytBP1FbFcEJovv4rnnm/CokuVvrBEtW/Va9DvuJ3HksbXUJEpI0aV1KtuL4ZoO9AVE6PyNLzF7tLeA==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - } - }, - "node_modules/@tanstack/react-query": { - "version": "4.36.1", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.36.1.tgz", - "integrity": "sha512-y7ySVHFyyQblPl3J3eQBWpXZkliroki3ARnBKsdJchlgt7yJLRDUcf4B8soufgiYt3pEQIkBWBx1N9/ZPIeUWw==", - "dependencies": { - "@tanstack/query-core": "4.36.1", - "use-sync-external-store": "^1.2.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react-native": "*" - }, - "peerDependenciesMeta": { - "react-dom": { - "optional": true - }, - "react-native": { - "optional": true - } - } - }, "node_modules/@types/codemirror": { "version": "5.60.10", "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.10.tgz", @@ -1635,6 +1998,23 @@ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" }, + "node_modules/@types/hast": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.6.tgz", + "integrity": "sha512-47rJE80oqPmFdVDCD7IheXBrVdwuBgsYwoczFvKmwfo2Mzsnt+V9OONsYauFmICb6lQPpCuXYJWejBNs4pDJRg==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/hoist-non-react-statics": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.3.tgz", + "integrity": "sha512-Wny3a2UXn5FEA1l7gc6BbpoV5mD1XijZqgkp4TRgDCDL5r3B5ieOFGUX5h3n78Tr1MEG7BfvoM8qeztdvNU0fw==", + "dependencies": { + "@types/react": "*", + "hoist-non-react-statics": "^3.3.0" + } + }, "node_modules/@types/json-schema": { "version": "7.0.13", "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.13.tgz", @@ -1644,14 +2024,20 @@ "node_modules/@types/prop-types": { "version": "15.7.5", "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", - "devOptional": true + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + }, + "node_modules/@types/ramda": { + "version": "0.29.6", + "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.29.6.tgz", + "integrity": "sha512-4XQ9hYQhCwOxfkoTsIPvDVXc75fY5+MLQHUpExX6ByvU1q+0vOYRLSjWAt1IydkE1hOuhwMH6KvV/9rhzgrvRw==", + "dependencies": { + "types-ramda": "^0.29.5" + } }, "node_modules/@types/react": { "version": "18.2.25", "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.25.tgz", "integrity": "sha512-24xqse6+VByVLIr+xWaQ9muX1B4bXJKXBbjszbld/UEDslGLY53+ZucF44HCmLbMPejTzGG9XgR+3m2/Wqu1kw==", - "devOptional": true, "dependencies": { "@types/prop-types": "*", "@types/scheduler": "*", @@ -1670,8 +2056,7 @@ "node_modules/@types/scheduler": { "version": "0.16.3", "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", - "devOptional": true + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" }, "node_modules/@types/semver": { "version": "7.5.3", @@ -1679,6 +2064,15 @@ "integrity": "sha512-OxepLK9EuNEIPxWNME+C6WwbRAOOI2o2BaQEGzz5Lu2e4Z5eDnEo+/aVEDMIXywoJitJ7xWd641wrGLZdtwRyw==", "dev": true }, + "node_modules/@types/swagger-ui-react": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@types/swagger-ui-react/-/swagger-ui-react-4.18.1.tgz", + "integrity": "sha512-nYhNi+cyN78vve1/QY5PNKYzHYlDKETtXj+gQAhuoCRB+GxGT3MVJUj8WCdwYj4vF0s1j68qkLv/66DGe5ZlnA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, "node_modules/@types/tern": { "version": "0.23.5", "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.5.tgz", @@ -1687,6 +2081,16 @@ "@types/estree": "*" } }, + "node_modules/@types/unist": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.8.tgz", + "integrity": "sha512-d0XxK3YTObnWVp6rZuev3c49+j4Lo8g4L1ZRm9z5L0xpoZycUPshHgczK5gsUMaZOstjVYYi09p5gYvUtfChYw==" + }, + "node_modules/@types/use-sync-external-store": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz", + "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" + }, "node_modules/@typescript-eslint/eslint-plugin": { "version": "6.7.5", "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.5.tgz", @@ -1888,6 +2292,11 @@ "vite": "^4" } }, + "node_modules/@yarnpkg/lockfile": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", + "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==" + }, "node_modules/acorn": { "version": "8.10.0", "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", @@ -1938,7 +2347,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -1974,17 +2382,76 @@ "node": ">=8" } }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autolinker": { + "version": "3.16.2", + "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-3.16.2.tgz", + "integrity": "sha512-JiYl7j2Z19F9NdTmirENSUUIIL/9MytEWtmzhfmsKPCp9E+G35Y0UNCMoM9tFigxT59qSc8Ml2dlZXOCVTYwuA==", + "dependencies": { + "tslib": "^2.3.0" + } + }, + "node_modules/axios": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.5.1.tgz", + "integrity": "sha512-Q28iYCWzNHjAm+yEAot5QaAMxhMghWLFVf7rRdwhUI+c2jix2DUXjAHXVi+s1ibs3mjPO/cCgbA++3BjD0vP/A==", + "dependencies": { + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "optional": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -1994,7 +2461,6 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, "dependencies": { "fill-range": "^7.0.1" }, @@ -2002,6 +2468,42 @@ "node": ">=8" } }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true, + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/call-bind": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.2.tgz", + "integrity": "sha512-7O+FbCihrB5WGbFYesctwmTKae6rOiIzmz1icreWJ+0aA7LJfuqhEso2T9ncpcFtzMQtzXf2QGGueWJGTYsqrA==", + "dependencies": { + "function-bind": "^1.1.1", + "get-intrinsic": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -2015,7 +2517,6 @@ "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -2027,6 +2528,58 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "optional": true + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/classnames": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", + "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" + }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", @@ -2071,7 +2624,6 @@ "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -2082,14 +2634,40 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } }, "node_modules/copy-to-clipboard": { "version": "3.3.3", @@ -2099,11 +2677,20 @@ "toggle-selection": "^1.0.6" } }, + "node_modules/core-js-pure": { + "version": "3.33.0", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.33.0.tgz", + "integrity": "sha512-FKSIDtJnds/YFIEaZ4HszRX7hkxGpNKM7FC9aJ9WLJbSd3lD4vOltFuVIBLR8asSx9frkTSqL0dw90SKQxgKrg==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -2113,11 +2700,15 @@ "node": ">= 8" } }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==" + }, "node_modules/csstype": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", - "devOptional": true + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" }, "node_modules/debug": { "version": "4.3.4", @@ -2136,25 +2727,73 @@ } } }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "optional": true, + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true }, - "node_modules/detect-node-es": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", - "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" - }, - "node_modules/dir-glob": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", - "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", - "dev": true, - "dependencies": { - "path-type": "^4.0.0" - }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-libc": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", + "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "optional": true, + "engines": { + "node": ">=8" + } + }, + "node_modules/detect-node-es": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", + "integrity": "sha512-ypdmJU/TbBby2Dxibuv7ZLW3Bs1QEmM7nHjEANfohJLvE0XVujisn1qPJcZxg+qDucsr+bP6fLD1rPS3AhJ7EQ==" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "dependencies": { + "path-type": "^4.0.0" + }, "engines": { "node": ">=8" } @@ -2171,6 +2810,28 @@ "node": ">=6.0.0" } }, + "node_modules/dompurify": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.6.tgz", + "integrity": "sha512-ilkD8YEnnGh1zJ240uJsW7AzE+2qpbOUYjacomn3AvJ6J4JhKGSZ2nh4wUIXPZrEPppaCLx5jFe8T89Rk8tQ7w==" + }, + "node_modules/drange": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/drange/-/drange-1.1.1.tgz", + "integrity": "sha512-pYxfDYpued//QpnLIm4Avk7rsNtAtQkUES2cwAYSvD/wd2pKD71gN2Ebj3e7klzXwjocvE8c5vx/1fxwpqmSxA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "optional": true, + "dependencies": { + "once": "^1.4.0" + } + }, "node_modules/entities": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", @@ -2408,6 +3069,15 @@ "node": ">=0.10.0" } }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "optional": true, + "engines": { + "node": ">=6" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -2468,6 +3138,18 @@ "reusify": "^1.0.4" } }, + "node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -2484,7 +3166,6 @@ "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -2508,6 +3189,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/find-yarn-workspace-root": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", + "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", + "dependencies": { + "micromatch": "^4.0.2" + } + }, "node_modules/flat-cache": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", @@ -2527,6 +3216,46 @@ "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", "dev": true }, + "node_modules/follow-redirects": { + "version": "1.15.3", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", + "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, "node_modules/framer-motion": { "version": "6.5.1", "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-6.5.1.tgz", @@ -2555,11 +3284,30 @@ "tslib": "^2.1.0" } }, + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "optional": true + }, + "node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, "node_modules/fsevents": { "version": "2.3.2", @@ -2575,6 +3323,28 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.1.tgz", + "integrity": "sha512-2DcsyfABl+gVHEfCOaTrWgyt+tb6MSEGmKq+kI5HwLbIYgjgmMcV8KQ41uaKz1xxUcn9tJtgFbQUEVcEbd0FYw==", + "dependencies": { + "function-bind": "^1.1.1", + "has": "^1.0.3", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -2583,11 +3353,16 @@ "node": ">=6" } }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "optional": true + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -2650,6 +3425,11 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -2695,20 +3475,109 @@ "graphql": "^15.5.0 || ^16.0.0" } }, + "node_modules/has": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.4.tgz", + "integrity": "sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ==", + "engines": { + "node": ">= 0.4.0" + } + }, "node_modules/has-flag": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, "engines": { "node": ">=8" } }, + "node_modules/has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hey-listen": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/hey-listen/-/hey-listen-1.0.8.tgz", "integrity": "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q==" }, + "node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "engines": { + "node": "*" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", @@ -2718,6 +3587,14 @@ "node": ">= 4" } }, + "node_modules/immutable": { + "version": "3.8.2", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-3.8.2.tgz", + "integrity": "sha512-15gZoQ38eYjEjxkorfbcgBKBL6R7T459OuK+CpcWt7O3KF4uPCx2tD0uFETlUDIyo+1789crbMhTvQBSR5yBMg==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", @@ -2747,7 +3624,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2756,8 +3632,13 @@ "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "optional": true }, "node_modules/invariant": { "version": "2.2.4", @@ -2767,6 +3648,51 @@ "loose-envify": "^1.0.0" } }, + "node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -2788,11 +3714,19 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "engines": { "node": ">=0.12.0" } @@ -2825,11 +3759,21 @@ "node": ">=0.10.0" } }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, "node_modules/isobject": { "version": "3.0.1", @@ -2839,6 +3783,11 @@ "node": ">=0.10.0" } }, + "node_modules/js-file-download": { + "version": "0.4.12", + "resolved": "https://registry.npmjs.org/js-file-download/-/js-file-download-0.4.12.tgz", + "integrity": "sha512-rML+NkoD08p5Dllpjo0ffy4jRHeY6Zsapvr/W86N7E0yuzAO6qa5X9+xog6zQNlH102J7IXljNY2FtS6Lj3ucg==" + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -2848,7 +3797,6 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, "dependencies": { "argparse": "^2.0.1" }, @@ -2862,12 +3810,50 @@ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, + "node_modules/json-stable-stringify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.2.tgz", + "integrity": "sha512-eunSSaEnxV12z+Z73y/j5N37/In40GK4GmsSy+tEHJMxknvqnA7/djeYtAgW0GsWHUfg+847WJjKaEylk2y09g==", + "dependencies": { + "jsonify": "^0.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", + "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -2904,6 +3890,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -2921,11 +3917,23 @@ "loose-envify": "cli.js" } }, + "node_modules/lowlight": { + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-1.20.0.tgz", + "integrity": "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==", + "dependencies": { + "fault": "^1.0.0", + "highlight.js": "~10.7.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, "dependencies": { "yallist": "^4.0.0" }, @@ -2982,7 +3990,6 @@ "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "dev": true, "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -2991,30 +3998,91 @@ "node": ">=8.6" } }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", "dependencies": { - "brace-expansion": "^1.1.7" + "mime-db": "1.52.0" }, "engines": { - "node": "*" + "node": ">= 0.6" } }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/nanoid": { - "version": "3.3.6", - "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", - "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", - "dev": true, - "funding": [ + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "optional": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minim": { + "version": "0.23.8", + "resolved": "https://registry.npmjs.org/minim/-/minim-0.23.8.tgz", + "integrity": "sha512-bjdr2xW1dBCMsMGGsUeqM4eFI60m94+szhxWys+B1ztIt6gWSfeGBdSVCIawezeHYLYn0j6zrsXdQS/JllBzww==", + "dependencies": { + "lodash": "^4.15.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "optional": true + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true + }, + "node_modules/nan": { + "version": "2.18.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.18.0.tgz", + "integrity": "sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==", + "optional": true + }, + "node_modules/nanoid": { + "version": "3.3.6", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", + "integrity": "sha512-BGcqMMJuToF7i1rt+2PWSNVnWIkGCU78jBG3RxO/bZlnZPK2Cmi2QaffxGO/2RvWi9sL+FAiRiXMgsyxQ1DIDA==", + "dev": true, + "funding": [ { "type": "github", "url": "https://github.com/sponsors/ai" @@ -3027,26 +4095,113 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/napi-build-utils": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", + "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==", + "optional": true + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, + "node_modules/node-abi": { + "version": "3.51.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.51.0.tgz", + "integrity": "sha512-SQkEP4hmNWjlniS5zdnfIXTk1x7Ome85RDzHlTbBtzE97Gfwz/Ipw4v/Ryk20DWIy3yCNVLVlGKApCnmvYoJbA==", + "optional": true, + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch-commonjs": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch-commonjs/-/node-fetch-commonjs-3.3.2.tgz", + "integrity": "sha512-VBlAiynj3VMLrotgwOS3OyECFxas5y7ltLcK4t41lMUZeaK15Ym4QRkqN0EQKAFL42q9i21EPKjzLUPfltR72A==", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, "node_modules/nullthrows": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/nullthrows/-/nullthrows-1.1.1.tgz", "integrity": "sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==" }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.0", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.0.tgz", + "integrity": "sha512-HQ4J+ic8hKrgIt3mqk6cVOVrW2ozL4KdvHlqpBv9vDYWx9ysAgENAdvy4FoGF+KFdhR7nQTNm5J0ctAeOwn+3g==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, "dependencies": { "wrappy": "1" } }, + "node_modules/open": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", + "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "dependencies": { + "is-docker": "^2.0.0", + "is-wsl": "^2.1.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/optionator": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", @@ -3064,6 +4219,14 @@ "node": ">= 0.8.0" } }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", @@ -3106,6 +4269,71 @@ "node": ">=6" } }, + "node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/patch-package": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.0.tgz", + "integrity": "sha512-da8BVIhzjtgScwDJ2TtKsfT5JFWz1hYoBl9rUQ1f38MC2HwnEIkK8VN3dKMKcP7P7bvvgzNDbfNHtx3MsQb5vA==", + "dependencies": { + "@yarnpkg/lockfile": "^1.1.0", + "chalk": "^4.1.2", + "ci-info": "^3.7.0", + "cross-spawn": "^7.0.3", + "find-yarn-workspace-root": "^2.0.0", + "fs-extra": "^9.0.0", + "json-stable-stringify": "^1.0.2", + "klaw-sync": "^6.0.0", + "minimist": "^1.2.6", + "open": "^7.4.2", + "rimraf": "^2.6.3", + "semver": "^7.5.3", + "slash": "^2.0.0", + "tmp": "^0.0.33", + "yaml": "^2.2.2" + }, + "bin": { + "patch-package": "index.js" + }, + "engines": { + "node": ">=14", + "npm": ">5" + } + }, + "node_modules/patch-package/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/patch-package/node_modules/slash": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", + "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", + "engines": { + "node": ">=6" + } + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -3119,7 +4347,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -3128,7 +4355,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, "engines": { "node": ">=8" } @@ -3152,7 +4378,6 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, "engines": { "node": ">=8.6" }, @@ -3199,6 +4424,32 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prebuild-install": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", + "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", + "optional": true, + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^1.0.1", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -3208,6 +4459,59 @@ "node": ">= 0.8.0" } }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "optional": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/punycode": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", @@ -3217,6 +4521,25 @@ "node": ">=6" } }, + "node_modules/qs": { + "version": "6.11.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", + "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -3237,6 +4560,74 @@ } ] }, + "node_modules/ramda": { + "version": "0.29.1", + "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.29.1.tgz", + "integrity": "sha512-OfxIeWzd4xdUNxlWhgFazxsA/nl3mS4/jGZI5n00uWOoSSFRhC1b6gl6xvmzUamgmqELraWp0J/qqVlXYPDPyA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ramda" + } + }, + "node_modules/ramda-adjunct": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ramda-adjunct/-/ramda-adjunct-4.1.1.tgz", + "integrity": "sha512-BnCGsZybQZMDGram9y7RiryoRHS5uwx8YeGuUeDKuZuvK38XO6JJfmK85BwRWAKFA6pZ5nZBO/HBFtExVaf31w==", + "engines": { + "node": ">=0.10.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ramda-adjunct" + }, + "peerDependencies": { + "ramda": ">= 0.29.0" + } + }, + "node_modules/randexp": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/randexp/-/randexp-0.5.3.tgz", + "integrity": "sha512-U+5l2KrcMNOUPYvazA3h5ekF80FHTUG+87SEAmHZmolh1M+i/WyTCxVzmi+tidIa1tM4BSe8g2Y/D3loWDjj+w==", + "dependencies": { + "drange": "^1.0.2", + "ret": "^0.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "optional": true, + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", @@ -3248,6 +4639,30 @@ "node": ">=0.10.0" } }, + "node_modules/react-copy-to-clipboard": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/react-copy-to-clipboard/-/react-copy-to-clipboard-5.1.0.tgz", + "integrity": "sha512-k61RsNgAayIJNoy9yDsYzDe/yAZAzEbEgcz3DZMhF686LEyukcE1hzurxe85JandPUG+yTfGVFzuEw3xt8WP/A==", + "dependencies": { + "copy-to-clipboard": "^3.3.1", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": "^15.3.0 || 16 || 17 || 18" + } + }, + "node_modules/react-debounce-input": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/react-debounce-input/-/react-debounce-input-3.3.0.tgz", + "integrity": "sha512-VEqkvs8JvY/IIZvh71Z0TC+mdbxERvYF33RcebnodlsUZ8RSgyKe2VWaHXv4+/8aoOgXLxWrdsYs2hDhcwbUgA==", + "dependencies": { + "lodash.debounce": "^4", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": "^15.3.0 || 16 || 17 || 18" + } + }, "node_modules/react-dom": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", @@ -3260,21 +4675,83 @@ "react": "^18.2.0" } }, - "node_modules/react-hook-form": { - "version": "7.47.0", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.47.0.tgz", - "integrity": "sha512-F/TroLjTICipmHeFlMrLtNLceO2xr1jU3CyiNla5zdwsGUGu2UOxxR4UyJgLlhMwLW/Wzp4cpJ7CPfgJIeKdSg==", - "engines": { - "node": ">=12.22.0" + "node_modules/react-immutable-proptypes": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/react-immutable-proptypes/-/react-immutable-proptypes-2.2.0.tgz", + "integrity": "sha512-Vf4gBsePlwdGvSZoLSBfd4HAP93HDauMY4fDjXhreg/vg6F3Fj/MXDNyTbltPC/xZKmZc+cjLu3598DdYK6sgQ==", + "dependencies": { + "invariant": "^2.2.2" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/react-hook-form" + "peerDependencies": { + "immutable": ">=3.6.2" + } + }, + "node_modules/react-immutable-pure-component": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/react-immutable-pure-component/-/react-immutable-pure-component-2.2.2.tgz", + "integrity": "sha512-vkgoMJUDqHZfXXnjVlG3keCxSO/U6WeDQ5/Sl0GK2cH8TOxEzQ5jXqDXHEL/jqk6fsNxV05oH5kD7VNMUE2k+A==", + "peerDependencies": { + "immutable": ">= 2 || >= 4.0.0-rc", + "react": ">= 16.6", + "react-dom": ">= 16.6" + } + }, + "node_modules/react-inspector": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/react-inspector/-/react-inspector-6.0.2.tgz", + "integrity": "sha512-x+b7LxhmHXjHoU/VrFAzw5iutsILRoYyDq97EDYdFpPLcvqtEzk4ZSZSQjnFPbr5T57tLXnHcqFYoN1pI6u8uQ==", + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-redux": { + "version": "8.1.3", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-8.1.3.tgz", + "integrity": "sha512-n0ZrutD7DaX/j9VscF+uTALI3oUPa/pO4Z3soOBIjuRn/FzVu6aehhysxZCLi6y7duMf52WNZGMl7CtuK5EnRw==", + "dependencies": { + "@babel/runtime": "^7.12.1", + "@types/hoist-non-react-statics": "^3.3.1", + "@types/use-sync-external-store": "^0.0.3", + "hoist-non-react-statics": "^3.3.2", + "react-is": "^18.0.0", + "use-sync-external-store": "^1.0.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17 || ^18" + "@types/react": "^16.8 || ^17.0 || ^18.0", + "@types/react-dom": "^16.8 || ^17.0 || ^18.0", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0", + "react-native": ">=0.59", + "redux": "^4 || ^5.0.0-beta.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + }, + "redux": { + "optional": true + } } }, + "node_modules/react-redux/node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + }, "node_modules/react-remove-scroll": { "version": "2.5.5", "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", @@ -3342,11 +4819,119 @@ } } }, + "node_modules/react-syntax-highlighter": { + "version": "15.5.0", + "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-15.5.0.tgz", + "integrity": "sha512-+zq2myprEnQmH5yw6Gqc8lD55QHnpKaU8TOcFeC/Lg/MQSs8UknEA0JC4nTZGFAXC2J2Hyj/ijJ7NlabyPi2gg==", + "dependencies": { + "@babel/runtime": "^7.3.1", + "highlight.js": "^10.4.1", + "lowlight": "^1.17.0", + "prismjs": "^1.27.0", + "refractor": "^3.6.0" + }, + "peerDependencies": { + "react": ">= 0.14.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "optional": true, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/redux": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/redux/-/redux-4.2.1.tgz", + "integrity": "sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==", + "dependencies": { + "@babel/runtime": "^7.9.2" + } + }, + "node_modules/redux-immutable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/redux-immutable/-/redux-immutable-4.0.0.tgz", + "integrity": "sha512-SchSn/DWfGb3oAejd+1hhHx01xUoxY+V7TeK0BKqpkLKiQPVFf7DYzEaKmrEVxsWxielKfSK9/Xq66YyxgR1cg==", + "peerDependencies": { + "immutable": "^3.8.1 || ^4.0.0-rc.1" + } + }, + "node_modules/refractor": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/refractor/-/refractor-3.6.0.tgz", + "integrity": "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==", + "dependencies": { + "hastscript": "^6.0.0", + "parse-entities": "^2.0.0", + "prismjs": "~1.27.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/prismjs": { + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", + "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==", + "engines": { + "node": ">=6" + } + }, "node_modules/regenerator-runtime": { "version": "0.14.0", "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" }, + "node_modules/remarkable": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-2.0.1.tgz", + "integrity": "sha512-YJyMcOH5lrR+kZdmB0aJJ4+93bEojRZ1HGDn9Eagu6ibg7aVZhc3OWbbShRid+Q5eAfsEqWxpe+g5W5nYNfNiA==", + "dependencies": { + "argparse": "^1.0.10", + "autolinker": "^3.11.0" + }, + "bin": { + "remarkable": "bin/remarkable.js" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/remarkable/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/reselect": { + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-4.1.8.tgz", + "integrity": "sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==" + }, "node_modules/resolve-from": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", @@ -3356,6 +4941,14 @@ "node": ">=4" } }, + "node_modules/ret": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.2.2.tgz", + "integrity": "sha512-M0b3YWQs7R3Z917WRQy1HHA7Ba7D8hvZg6UE5mLykJxQVE2ju0IXbGlaHPPlkY+WN7wFP+wUMXmBFA0aV6vYGQ==", + "engines": { + "node": ">=4" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -3420,6 +5013,25 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/scheduler": { "version": "0.23.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", @@ -3432,7 +5044,6 @@ "version": "7.5.4", "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, "dependencies": { "lru-cache": "^6.0.0" }, @@ -3443,6 +5054,20 @@ "node": ">=10" } }, + "node_modules/serialize-error": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-8.1.0.tgz", + "integrity": "sha512-3NnuWfM6vBYoy5gZFvHiYsVbafvI9vZv/+jlIigFn4oP4zjNPK3LhcY0xSCgeb1a5L8jO71Mit9LlNoi2UfDDQ==", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/set-value": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/set-value/-/set-value-4.1.0.tgz", @@ -3460,11 +5085,22 @@ "node": ">=11.0" } }, + "node_modules/sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "bin": { + "sha.js": "bin.js" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, "dependencies": { "shebang-regex": "^3.0.0" }, @@ -3476,11 +5112,77 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, "engines": { "node": ">=8" } }, + "node_modules/short-unique-id": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/short-unique-id/-/short-unique-id-5.0.3.tgz", + "integrity": "sha512-yhniEILouC0s4lpH0h7rJsfylZdca10W9mDJRAFh3EpcSUanCHGb0R7kcFOIUCZYSAPo0PUD5ZxWQdW0T4xaug==", + "bin": { + "short-unique-id": "bin/short-unique-id", + "suid": "bin/short-unique-id" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true + }, + "node_modules/simple-get": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", + "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true, + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", @@ -3499,6 +5201,34 @@ "node": ">=0.10.0" } }, + "node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/stampit": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/stampit/-/stampit-4.3.2.tgz", + "integrity": "sha512-pE2org1+ZWQBnIxRPrBM2gVupkuDD0TTNIo1H6GdT/vO82NXli2z8lRE8cu/nBIHrcOCXFBAHpb9ZldrB2/qOA==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "optional": true, + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -3542,7 +5272,6 @@ "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -3550,17 +5279,130 @@ "node": ">=8" } }, + "node_modules/swagger-client": { + "version": "3.23.1", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.23.1.tgz", + "integrity": "sha512-ecRJsoGozhGvEUmim2kIc/pH9BllnPVuajuEXVm49EDbwbwbp7P+i5EW+8w5FLaqmGrx9eio51G9bvJV/XC+YQ==", + "dependencies": { + "@babel/runtime-corejs3": "^7.22.15", + "@swagger-api/apidom-core": ">=0.77.0 <1.0.0", + "@swagger-api/apidom-json-pointer": ">=0.77.0 <1.0.0", + "@swagger-api/apidom-ns-openapi-3-1": ">=0.77.0 <1.0.0", + "@swagger-api/apidom-reference": ">=0.77.0 <1.0.0", + "cookie": "~0.5.0", + "deepmerge": "~4.3.0", + "fast-json-patch": "^3.0.0-1", + "is-plain-object": "^5.0.0", + "js-yaml": "^4.1.0", + "node-abort-controller": "^3.1.1", + "node-fetch-commonjs": "^3.3.1", + "qs": "^6.10.2", + "traverse": "~0.6.6", + "undici": "^5.24.0" + } + }, + "node_modules/swagger-client/node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/swagger-ui-react": { + "version": "5.9.0", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.9.0.tgz", + "integrity": "sha512-j45ceuGHMRmI8nhOaG71VeQwrPutFHDq6QhgrxOmf4BRMOdOQgVY1POQY9ksnXZtskbD9J2NHURs4BLEDIs8gA==", + "dependencies": { + "@babel/runtime-corejs3": "^7.23.1", + "@braintree/sanitize-url": "=6.0.4", + "base64-js": "^1.5.1", + "classnames": "^2.3.1", + "css.escape": "1.5.1", + "deep-extend": "0.6.0", + "dompurify": "=3.0.6", + "ieee754": "^1.2.1", + "immutable": "^3.x.x", + "js-file-download": "^0.4.12", + "js-yaml": "=4.1.0", + "lodash": "^4.17.21", + "patch-package": "^8.0.0", + "prop-types": "^15.8.1", + "randexp": "^0.5.3", + "randombytes": "^2.1.0", + "react-copy-to-clipboard": "5.1.0", + "react-debounce-input": "=3.3.0", + "react-immutable-proptypes": "2.2.0", + "react-immutable-pure-component": "^2.2.0", + "react-inspector": "^6.0.1", + "react-redux": "^8.1.2", + "react-syntax-highlighter": "^15.5.0", + "redux": "^4.1.2", + "redux-immutable": "^4.0.0", + "remarkable": "^2.0.1", + "reselect": "^4.1.8", + "serialize-error": "^8.1.0", + "sha.js": "^2.4.11", + "swagger-client": "^3.22.3", + "url-parse": "^1.5.10", + "xml": "=1.0.1", + "xml-but-prettier": "^1.0.1", + "zenscroll": "^4.0.2" + }, + "peerDependencies": { + "react": ">=17.0.0", + "react-dom": ">=17.0.0" + } + }, + "node_modules/tar-fs": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", + "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "optional": true, + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "optional": true, + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -3573,6 +5415,45 @@ "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==" }, + "node_modules/traverse": { + "version": "0.6.7", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.7.tgz", + "integrity": "sha512-/y956gpUo9ZNCb99YjxG7OaslxZWHfCHAUUfshwqOXmxUIvqLjVO581BT+gM59+QV9tFe6/CGG53tsA1Y7RSdg==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tree-sitter": { + "version": "0.20.4", + "resolved": "https://registry.npmjs.org/tree-sitter/-/tree-sitter-0.20.4.tgz", + "integrity": "sha512-rjfR5dc4knG3jnJNN/giJ9WOoN1zL/kZyrS0ILh+eqq8RNcIbiXA63JsMEgluug0aNvfQvK4BfCErN1vIzvKog==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.17.0", + "prebuild-install": "^7.1.1" + } + }, + "node_modules/tree-sitter-json": { + "version": "0.20.1", + "resolved": "https://registry.npmjs.org/tree-sitter-json/-/tree-sitter-json-0.20.1.tgz", + "integrity": "sha512-482hf7J+aBwhksSw8yWaqI8nyP1DrSwnS4IMBShsnkFWD3SE8oalHnsEik59fEVi3orcTCUtMzSjZx+0Tpa6Vw==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.18.0" + } + }, + "node_modules/tree-sitter-yaml": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tree-sitter-yaml/-/tree-sitter-yaml-0.5.0.tgz", + "integrity": "sha512-POJ4ZNXXSWIG/W4Rjuyg36MkUD4d769YRUGKRqN+sVaj/VCo6Dh6Pkssn1Rtewd5kybx+jT1BWMyWN0CijXnMA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.14.0" + } + }, "node_modules/ts-api-utils": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.0.1.tgz", @@ -3585,11 +5466,28 @@ "typescript": ">=4.2.0" } }, + "node_modules/ts-toolbelt": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-9.6.0.tgz", + "integrity": "sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==" + }, "node_modules/tslib": { "version": "2.6.2", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "optional": true, + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3606,7 +5504,6 @@ "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, "engines": { "node": ">=10" }, @@ -3614,6 +5511,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/types-ramda": { + "version": "0.29.5", + "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.29.5.tgz", + "integrity": "sha512-u+bAYXHDPJR+amB0qMrMU/NXRB2PG8QqpO2v6j7yK/0mPZhlaaZj++ynYjnVpkPEpCkZEGxNpWY3X7qyLCGE3w==", + "dependencies": { + "ts-toolbelt": "^9.6.0" + } + }, "node_modules/typescript": { "version": "5.2.2", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", @@ -3632,6 +5537,30 @@ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, + "node_modules/undici": { + "version": "5.26.3", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.26.3.tgz", + "integrity": "sha512-H7n2zmKEWgOllKkIUkLvFmsJQj062lSm3uA4EYApG8gLuiOM0/go9bIoC3HVaSnfg4xunowDE2i9p8drkXuvDw==", + "dependencies": { + "@fastify/busboy": "^2.0.0" + }, + "engines": { + "node": ">=14.0" + } + }, + "node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unraw": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unraw/-/unraw-3.0.0.tgz", + "integrity": "sha512-08/DA66UF65OlpUDIQtbJyrqTR0jTAlJ+jsnkQ4jxR7+K5g5YG1APZKQSMCE1vqqmD+2pv6+IdEjmopFatacvg==" + }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -3641,6 +5570,15 @@ "punycode": "^2.1.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/use-callback-ref": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.0.tgz", @@ -3690,6 +5628,12 @@ "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "optional": true + }, "node_modules/vite": { "version": "4.4.11", "resolved": "https://registry.npmjs.org/vite/-/vite-4.4.11.tgz", @@ -3756,11 +5700,24 @@ "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", "peer": true }, + "node_modules/web-streams-polyfill": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz", + "integrity": "sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/web-tree-sitter": { + "version": "0.20.3", + "resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.20.3.tgz", + "integrity": "sha512-zKGJW9r23y3BcJusbgvnOH2OYAW40MXAOi9bi3Gcc7T4Gms9WWgXF8m6adsJWpGJEhgOzCrfiz1IzKowJWrtYw==", + "optional": true + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, "dependencies": { "isexe": "^2.0.0" }, @@ -3774,14 +5731,41 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/xml": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/xml/-/xml-1.0.1.tgz", + "integrity": "sha512-huCv9IH9Tcf95zuYCsQraZtWnJvBtLVE0QHMOs8bWyZAFZNDcYjsPq1nEx8jKA9y+Beo9v+7OBPRisQTjinQMw==" + }, + "node_modules/xml-but-prettier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/xml-but-prettier/-/xml-but-prettier-1.0.1.tgz", + "integrity": "sha512-C2CJaadHrZTqESlH03WOyw0oZTtoy2uEg6dSDF6YRg+9GnYNub53RRemLpnvtbHDFelxMx4LajiFsYeR6XJHgQ==", + "dependencies": { + "repeat-string": "^1.5.2" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/yaml": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.3.tgz", + "integrity": "sha512-zw0VAJxgeZ6+++/su5AFoqBbZbrEakwu+X0M5HmcwUiBL7AzcuPKjj5we4xfQLp78LkEMpD0cOnUhmgOVy3KdQ==", + "engines": { + "node": ">= 14" + } }, "node_modules/yocto-queue": { "version": "0.1.0", @@ -3794,6 +5778,11 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zenscroll": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zenscroll/-/zenscroll-4.0.2.tgz", + "integrity": "sha512-jEA1znR7b4C/NnaycInCU6h/d15ZzCd1jmsruqOKnZP6WXQSMH3W2GL+OXbkruslU4h+Tzuos0HdswzRUk/Vgg==" } } } diff --git a/playground/package.json b/playground/package.json index a896c3c97e..719dbeea30 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,17 +10,16 @@ "preview": "vite preview" }, "dependencies": { - "@tanstack/react-query": "^4.36.1", - "fast-json-patch": "^3.1.1", "graphiql": "^3.0.6", "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.47.0" + "swagger-ui-react": "^5.9.0" }, "devDependencies": { "@types/react": "^18.2.25", "@types/react-dom": "^18.2.13", + "@types/swagger-ui-react": "^4.18.1", "@typescript-eslint/eslint-plugin": "^6.7.5", "@typescript-eslint/parser": "^6.7.5", "@vitejs/plugin-react-swc": "^3.4.0", diff --git a/playground/src/App.tsx b/playground/src/App.tsx index dc00b98cbc..cb2fa035f3 100644 --- a/playground/src/App.tsx +++ b/playground/src/App.tsx @@ -8,28 +8,29 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. +import React from 'react' import { GraphiQL } from 'graphiql' import { createGraphiQLFetcher } from '@graphiql/toolkit' import { GraphiQLPlugin } from '@graphiql/react' -import { QueryClient, QueryClientProvider } from '@tanstack/react-query' -import { Plugin } from './components/Plugin' +import 'swagger-ui-react/swagger-ui.css' import 'graphiql/graphiql.css' -const client = new QueryClient() -const fetcher = createGraphiQLFetcher({ url: 'http://localhost:9181/api/v0/graphql' }) +const baseUrl = import.meta.env.DEV ? 'http://localhost:9181' : '' +const SwaggerUI = React.lazy(() => import('swagger-ui-react')) +const fetcher = createGraphiQLFetcher({ url: `${baseUrl}/api/v0/graphql` }) const plugin: GraphiQLPlugin = { - title: 'DefraDB', - icon: () => (
DB
), - content: () => (), + title: 'DefraDB API', + icon: () => (
API
), + content: () => ( + + + + ), } function App() { - return ( - - - - ) + return () } export default App diff --git a/playground/src/components/Plugin.tsx b/playground/src/components/Plugin.tsx deleted file mode 100644 index e8c727fe61..0000000000 --- a/playground/src/components/Plugin.tsx +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { useQuery } from '@tanstack/react-query' -import { SchemaLoadForm } from './SchemaLoadForm' -import { SchemaPatchForm } from './SchemaPatchForm' -import { listSchema } from '../lib/api' - -const defaultFieldTypes = [ - 'ID', - 'Boolean', - '[Boolean]', - '[Boolean!]', - 'Int', - '[Int]', - '[Int!]', - 'DateTime', - 'Float', - '[Float]', - '[Float!]', - 'String', - '[String]', - '[String!]', -] - -export function Plugin() { - const { data } = useQuery({ queryKey: ['schemas'], queryFn: listSchema }) - - const collections = data?.data?.collections ?? [] - const schemaFieldTypes = collections.map(col => [`${col.name}`, `[${col.name}]`]).flat() - const fieldTypes = [...defaultFieldTypes, ...schemaFieldTypes] - - return ( -
-

DefraDB

-
-
-

Add Schema

- -
- { collections?.map((schema) => -
-

{schema.name} Schema

- -
- )} -
-
- ) -} \ No newline at end of file diff --git a/playground/src/components/SchemaLoadForm.tsx b/playground/src/components/SchemaLoadForm.tsx deleted file mode 100644 index a1df44d87c..0000000000 --- a/playground/src/components/SchemaLoadForm.tsx +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { useState, useEffect } from 'react' -import { useForm } from 'react-hook-form' -import { useSchemaContext } from '@graphiql/react' -import { useQueryClient } from '@tanstack/react-query' -import { loadSchema, ErrorItem } from '../lib/api' - -export type FormData = { - schema: string -} - -const defaultValues: FormData = { - schema: '', -} - -export function SchemaLoadForm() { - const queryClient = useQueryClient() - const schemaContext = useSchemaContext({ nonNull: true }) - - const { formState, reset, register, handleSubmit } = useForm({ defaultValues }) - - const [errors, setErrors] = useState() - const [isLoading, setIsLoading] = useState(false) - - useEffect(() => { - if (formState.isSubmitSuccessful) reset(defaultValues) - }, [formState, reset]) - - const onSubmit = async (data: FormData) => { - setErrors(undefined) - setIsLoading(true) - - try { - const res = await loadSchema(data.schema) - if (res.errors) { - setErrors(res.errors) - } else { - schemaContext.introspect() - queryClient.invalidateQueries(['schemas']) - } - } catch(err: any) { - setErrors([{ message: err.message }]) - } finally { - setIsLoading(false) - } - } - - return ( -
- {errors?.map((error, index) => -
- {error.message} -
- )} -