diff --git a/README.md b/README.md
index 4d6afa6664..0406802b1d 100644
--- a/README.md
+++ b/README.md
@@ -244,13 +244,20 @@ When starting a node for the first time, a key pair is generated and stored in i
Each node has a unique `PeerID` generated from its public key. This ID allows other nodes to connect to it.
+To view your node's peer info:
+
+```shell
+defradb client p2p info
+```
+
There are two types of peer-to-peer relationships supported: **pubsub** peering and **replicator** peering.
Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have share a document and want to keep them in sync.
Replicator peering *actively* pushes changes from a specific collection *to* a target peer.
-### Pubsub example
+
+Pubsub example
Pubsub peers can be specified on the command line using the `--peers` flag, which accepts a comma-separated list of peer [multiaddresses](https://docs.libp2p.io/concepts/addressing/). For example, a node at IP `192.168.1.12` listening on 9000 with PeerID `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B` would be referred to using the multiaddress `/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`.
@@ -258,16 +265,22 @@ Let's go through an example of two nodes (*nodeA* and *nodeB*) connecting with e
Start *nodeA* with a default configuration:
-```
+```shell
defradb start
```
-Obtain the PeerID from its console output. In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different.
+Obtain the node's peer info:
+
+```shell
+defradb client p2p info
+```
+
+In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different.
For *nodeB*, we provide the following configuration:
-```
-defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B
+```shell
+defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B
```
About the flags:
@@ -275,26 +288,29 @@ About the flags:
- `--rootdir` specifies the root dir (config and data) to use
- `--url` is the address to listen on for the client HTTP and GraphQL API
- `--p2paddr` is the multiaddress for the P2P networking to listen on
-- `--tcpaddr` is the multiaddress for the gRPC server to listen on
- `--peers` is a comma-separated list of peer multiaddresses
This starts two nodes and connects them via pubsub networking.
+
-### Collection subscription example
+
+Subscription example
-It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the `rpc p2pcollection` command:
+It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the following command:
```shell
-defradb client rpc p2pcollection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske
+defradb client p2p collection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske
```
Multiple collection IDs can be added at once.
```shell
-defradb client rpc p2pcollection add --url localhost:9182
+defradb client p2p collection add --url localhost:9182 ,,
```
+
-### Replicator example
+
+Replicator example
Replicator peering is targeted: it allows a node to actively send updates to another node. Let's go through an example of *nodeA* actively replicating to *nodeB*:
@@ -334,14 +350,20 @@ defradb client schema add --url localhost:9182 '
'
```
-Set *nodeA* to actively replicate the "Article" collection to *nodeB*:
+Then copy the peer info from *nodeB*:
```shell
-defradb client rpc replicator set -c "Article" /ip4/0.0.0.0/tcp/9172/p2p/
+defradb client p2p info --url localhost:9182
```
-As we add or update documents in the "Article" collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub.
+Set *nodeA* to actively replicate the Article collection to *nodeB*:
+
+```shell
+defradb client p2p replicator set -c Article
+```
+As we add or update documents in the Article collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub.
+
## Securing the HTTP API with TLS
diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go
index 86e0d8f6f9..dedae0a358 100644
--- a/cli/p2p_collection_add.go
+++ b/cli/p2p_collection_add.go
@@ -31,7 +31,7 @@ Example: add multiple collections
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
var collectionIDs []string
for _, id := range strings.Split(args[0], ",") {
@@ -42,7 +42,7 @@ Example: add multiple collections
collectionIDs = append(collectionIDs, id)
}
- return store.AddP2PCollections(cmd.Context(), collectionIDs)
+ return p2p.AddP2PCollections(cmd.Context(), collectionIDs)
},
}
return cmd
diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go
index c07a63f453..10d98582c6 100644
--- a/cli/p2p_collection_getall.go
+++ b/cli/p2p_collection_getall.go
@@ -22,9 +22,9 @@ func MakeP2PCollectionGetAllCommand() *cobra.Command {
This is the list of collections of the node that are synchronized on the pubsub network.`,
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
- cols, err := store.GetAllP2PCollections(cmd.Context())
+ cols, err := p2p.GetAllP2PCollections(cmd.Context())
if err != nil {
return err
}
diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go
index 0c4d14effd..8aa0b5b7df 100644
--- a/cli/p2p_collection_remove.go
+++ b/cli/p2p_collection_remove.go
@@ -31,7 +31,7 @@ Example: remove multiple collections
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
var collectionIDs []string
for _, id := range strings.Split(args[0], ",") {
@@ -42,7 +42,7 @@ Example: remove multiple collections
collectionIDs = append(collectionIDs, id)
}
- return store.RemoveP2PCollections(cmd.Context(), collectionIDs)
+ return p2p.RemoveP2PCollections(cmd.Context(), collectionIDs)
},
}
return cmd
diff --git a/cli/p2p_info.go b/cli/p2p_info.go
index 1ddad18a52..36adfb8fac 100644
--- a/cli/p2p_info.go
+++ b/cli/p2p_info.go
@@ -23,12 +23,7 @@ func MakeP2PInfoCommand() *cobra.Command {
Long: `Get peer info from a DefraDB node`,
RunE: func(cmd *cobra.Command, args []string) error {
db := cmd.Context().Value(dbContextKey).(*http.Client)
-
- res, err := db.PeerInfo(cmd.Context())
- if err != nil {
- return err
- }
- return writeJSON(cmd, res)
+ return writeJSON(cmd, db.PeerInfo())
},
}
return cmd
diff --git a/cli/p2p_replicator_delete.go b/cli/p2p_replicator_delete.go
index 7504d0c932..6cc2ddf785 100644
--- a/cli/p2p_replicator_delete.go
+++ b/cli/p2p_replicator_delete.go
@@ -11,6 +11,8 @@
package cli
import (
+ "encoding/json"
+
"github.com/libp2p/go-libp2p/core/peer"
"github.com/spf13/cobra"
@@ -18,20 +20,32 @@ import (
)
func MakeP2PReplicatorDeleteCommand() *cobra.Command {
+ var collections []string
var cmd = &cobra.Command{
- Use: "delete ",
- Short: "Delete a replicator. It will stop synchronizing",
- Long: `Delete a replicator. It will stop synchronizing.`,
- Args: cobra.ExactArgs(1),
+ Use: "delete [-c, --collection] ",
+ Short: "Delete replicator(s) and stop synchronization",
+ Long: `Delete replicator(s) and stop synchronization.
+A replicator synchronizes one or all collection(s) from this node to another.
+
+Example:
+ defradb client p2p replicator delete -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}'
+ `,
+ Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
- addr, err := peer.AddrInfoFromString(args[0])
- if err != nil {
+ var info peer.AddrInfo
+ if err := json.Unmarshal([]byte(args[0]), &info); err != nil {
return err
}
- return store.DeleteReplicator(cmd.Context(), client.Replicator{Info: *addr})
+ rep := client.Replicator{
+ Info: info,
+ Schemas: collections,
+ }
+ return p2p.DeleteReplicator(cmd.Context(), rep)
},
}
+ cmd.Flags().StringSliceVarP(&collections, "collection", "c",
+ []string{}, "Collection(s) to stop replicating")
return cmd
}
diff --git a/cli/p2p_replicator_getall.go b/cli/p2p_replicator_getall.go
index 9192ed4d10..4bdf6e8487 100644
--- a/cli/p2p_replicator_getall.go
+++ b/cli/p2p_replicator_getall.go
@@ -19,11 +19,15 @@ func MakeP2PReplicatorGetAllCommand() *cobra.Command {
Use: "getall",
Short: "Get all replicators",
Long: `Get all the replicators active in the P2P data sync system.
-These are the replicators that are currently replicating data from one node to another.`,
+A replicator synchronizes one or all collection(s) from this node to another.
+
+Example:
+ defradb client p2p replicator getall
+ `,
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
- reps, err := store.GetAllReplicators(cmd.Context())
+ reps, err := p2p.GetAllReplicators(cmd.Context())
if err != nil {
return err
}
diff --git a/cli/p2p_replicator_set.go b/cli/p2p_replicator_set.go
index 6b590b6ea7..5d9c712a82 100644
--- a/cli/p2p_replicator_set.go
+++ b/cli/p2p_replicator_set.go
@@ -11,6 +11,8 @@
package cli
import (
+ "encoding/json"
+
"github.com/libp2p/go-libp2p/core/peer"
"github.com/spf13/cobra"
@@ -21,27 +23,30 @@ func MakeP2PReplicatorSetCommand() *cobra.Command {
var collections []string
var cmd = &cobra.Command{
Use: "set [-c, --collection] ",
- Short: "Set a P2P replicator",
- Long: `Add a new target replicator.
-A replicator replicates one or all collection(s) from this node to another.
+ Short: "Add replicator(s) and start synchronization",
+ Long: `Add replicator(s) and start synchronization.
+A replicator synchronizes one or all collection(s) from this node to another.
+
+Example:
+ defradb client p2p replicator set -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}'
`,
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
- store := mustGetStoreContext(cmd)
+ p2p := mustGetP2PContext(cmd)
- addr, err := peer.AddrInfoFromString(args[0])
- if err != nil {
+ var info peer.AddrInfo
+ if err := json.Unmarshal([]byte(args[0]), &info); err != nil {
return err
}
rep := client.Replicator{
- Info: *addr,
+ Info: info,
Schemas: collections,
}
- return store.SetReplicator(cmd.Context(), rep)
+ return p2p.SetReplicator(cmd.Context(), rep)
},
}
cmd.Flags().StringSliceVarP(&collections, "collection", "c",
- []string{}, "Define the collection for the replicator")
+ []string{}, "Collection(s) to replicate")
return cmd
}
diff --git a/cli/start.go b/cli/start.go
index f0f8b19a8a..da99ae06ba 100644
--- a/cli/start.go
+++ b/cli/start.go
@@ -171,15 +171,10 @@ type defraInstance struct {
func (di *defraInstance) close(ctx context.Context) {
if di.node != nil {
- if err := di.node.Close(); err != nil {
- log.FeedbackInfo(
- ctx,
- "The node could not be closed successfully",
- logging.NewKV("Error", err.Error()),
- )
- }
+ di.node.Close()
+ } else {
+ di.db.Close()
}
- di.db.Close(ctx)
if err := di.server.Close(); err != nil {
log.FeedbackInfo(
ctx,
@@ -222,7 +217,7 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) {
}
// init the p2p node
- var n *net.Node
+ var node *net.Node
if !cfg.Net.P2PDisabled {
nodeOpts := []net.NodeOpt{
net.WithConfig(cfg),
@@ -239,9 +234,9 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) {
nodeOpts = append(nodeOpts, net.WithPrivateKey(key))
}
log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress))
- n, err = net.NewNode(ctx, db, nodeOpts...)
+ node, err = net.NewNode(ctx, db, nodeOpts...)
if err != nil {
- db.Close(ctx)
+ db.Close()
return nil, errors.Wrap("failed to start P2P node", err)
}
@@ -253,14 +248,11 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) {
return nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err)
}
log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs))
- n.Bootstrap(addrs)
+ node.Bootstrap(addrs)
}
- if err := n.Start(); err != nil {
- if e := n.Close(); e != nil {
- err = errors.Wrap(fmt.Sprintf("failed to close node: %v", e.Error()), err)
- }
- db.Close(ctx)
+ if err := node.Start(); err != nil {
+ node.Close()
return nil, errors.Wrap("failed to start P2P listeners", err)
}
}
@@ -271,10 +263,6 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) {
httpapi.WithAllowedOrigins(cfg.API.AllowedOrigins...),
}
- if n != nil {
- sOpt = append(sOpt, httpapi.WithPeerID(n.PeerID().String()))
- }
-
if cfg.API.TLS {
sOpt = append(
sOpt,
@@ -284,32 +272,36 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) {
)
}
- s := httpapi.NewServer(db, sOpt...)
- if err := s.Listen(ctx); err != nil {
- return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", s.Addr), err)
+ var server *httpapi.Server
+ if node != nil {
+ server = httpapi.NewServer(node, sOpt...)
+ } else {
+ server = httpapi.NewServer(db, sOpt...)
+ }
+ if err := server.Listen(ctx); err != nil {
+ return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err)
}
// save the address on the config in case the port number was set to random
- cfg.API.Address = s.AssignedAddr()
+ cfg.API.Address = server.AssignedAddr()
// run the server in a separate goroutine
go func() {
log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", cfg.API.AddressToURL()))
- if err := s.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
+ if err := server.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) {
log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err)
- if n != nil {
- if err := n.Close(); err != nil {
- log.FeedbackErrorE(ctx, "Failed to close node", err)
- }
+ if node != nil {
+ node.Close()
+ } else {
+ db.Close()
}
- db.Close(ctx)
os.Exit(1)
}
}()
return &defraInstance{
- node: n,
+ node: node,
db: db,
- server: s,
+ server: server,
}, nil
}
diff --git a/cli/utils.go b/cli/utils.go
index d45808145e..8c1a40dc1f 100644
--- a/cli/utils.go
+++ b/cli/utils.go
@@ -52,6 +52,13 @@ func mustGetStoreContext(cmd *cobra.Command) client.Store {
return cmd.Context().Value(storeContextKey).(client.Store)
}
+// mustGetP2PContext returns the p2p implementation for the current command context.
+//
+// If a p2p implementation is not set in the current context this function panics.
+func mustGetP2PContext(cmd *cobra.Command) client.P2P {
+ return cmd.Context().Value(dbContextKey).(client.P2P)
+}
+
// tryGetCollectionContext returns the collection for the current command context
// and a boolean indicating if the collection was set.
func tryGetCollectionContext(cmd *cobra.Command) (client.Collection, bool) {
diff --git a/client/db.go b/client/db.go
index 47cd7d5a85..5e4873d8dc 100644
--- a/client/db.go
+++ b/client/db.go
@@ -60,7 +60,7 @@ type DB interface {
// be created after calling this to resume operations on the prior data - this is however dependant on
// the behaviour of the rootstore provided on database instance creation, as this function will Close
// the provided rootstore.
- Close(context.Context)
+ Close()
// Events returns the database event queue.
//
@@ -82,9 +82,6 @@ type DB interface {
// Store contains the core DefraDB read-write operations.
type Store interface {
- // P2P holds the P2P related methods that must be implemented by the database.
- P2P
-
// Backup holds the backup related methods that must be implemented by the database.
Backup
diff --git a/client/p2p.go b/client/p2p.go
index 800b946240..12be6ebf8d 100644
--- a/client/p2p.go
+++ b/client/p2p.go
@@ -12,9 +12,17 @@ package client
import (
"context"
+
+ "github.com/libp2p/go-libp2p/core/peer"
)
+// P2P is a peer connected database implementation.
type P2P interface {
+ DB
+
+ // PeerInfo returns the p2p host id and listening addresses.
+ PeerInfo() peer.AddrInfo
+
// SetReplicator adds a replicator to the persisted list or adds
// schemas if the replicator already exists.
SetReplicator(ctx context.Context, rep Replicator) error
diff --git a/config/config.go b/config/config.go
index 3b2a212c0a..24d5ee73e7 100644
--- a/config/config.go
+++ b/config/config.go
@@ -375,10 +375,11 @@ func (netcfg *NetConfig) validate() error {
peers := strings.Split(netcfg.Peers, ",")
maddrs := make([]ma.Multiaddr, len(peers))
for i, addr := range peers {
- maddrs[i], err = ma.NewMultiaddr(addr)
+ addr, err := ma.NewMultiaddr(addr)
if err != nil {
return NewErrInvalidBootstrapPeers(err, netcfg.Peers)
}
+ maddrs[i] = addr
}
}
return nil
diff --git a/db/backup_test.go b/db/backup_test.go
index 2f89f54a07..f0e7a6e338 100644
--- a/db/backup_test.go
+++ b/db/backup_test.go
@@ -25,7 +25,7 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -87,7 +87,7 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -149,7 +149,7 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -211,7 +211,7 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -285,7 +285,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -355,7 +355,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -414,7 +414,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -449,7 +449,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -484,7 +484,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
@@ -520,7 +520,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) {
ctx := context.Background()
db, err := newMemoryDB(ctx)
require.NoError(t, err)
- defer db.Close(ctx)
+ defer db.Close()
_, err = db.AddSchema(ctx, `type User {
name: String
diff --git a/db/db.go b/db/db.go
index 0bc9a361c3..9e73db04bd 100644
--- a/db/db.go
+++ b/db/db.go
@@ -183,10 +183,6 @@ func (db *db) Blockstore() blockstore.Blockstore {
return db.multistore.DAGstore()
}
-func (db *db) systemstore() datastore.DSReaderWriter {
- return db.multistore.Systemstore()
-}
-
func (db *db) LensRegistry() client.LensRegistry {
return db.lensRegistry
}
@@ -266,17 +262,17 @@ func (db *db) PrintDump(ctx context.Context) error {
// Close is called when we are shutting down the database.
// This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance).
-func (db *db) Close(ctx context.Context) {
- log.Info(ctx, "Closing DefraDB process...")
+func (db *db) Close() {
+ log.Info(context.Background(), "Closing DefraDB process...")
if db.events.Updates.HasValue() {
db.events.Updates.Value().Close()
}
err := db.rootstore.Close()
if err != nil {
- log.ErrorE(ctx, "Failure closing running process", err)
+ log.ErrorE(context.Background(), "Failure closing running process", err)
}
- log.Info(ctx, "Successfully closed running process")
+ log.Info(context.Background(), "Successfully closed running process")
}
func printStore(ctx context.Context, store datastore.DSReaderWriter) error {
diff --git a/db/index_test.go b/db/index_test.go
index d22746a363..f8f3d0b8e6 100644
--- a/db/index_test.go
+++ b/db/index_test.go
@@ -1153,8 +1153,7 @@ func TestDropIndex_ShouldDeleteIndex(t *testing.T) {
func TestDropIndex_IfStorageFails_ReturnError(t *testing.T) {
f := newIndexTestFixture(t)
desc := f.createUserCollectionIndexOnName()
-
- f.db.Close(f.ctx)
+ f.db.Close()
err := f.dropIndex(productsColName, desc.Name)
assert.Error(t, err)
@@ -1290,8 +1289,7 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) {
func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) {
f := newIndexTestFixture(t)
f.createUserCollectionIndexOnName()
-
- f.db.Close(f.ctx)
+ f.db.Close()
err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn)
assert.Error(t, err)
diff --git a/db/p2p_collection.go b/db/p2p_collection.go
deleted file mode 100644
index 02fc4139c2..0000000000
--- a/db/p2p_collection.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
-
- dsq "github.com/ipfs/go-datastore/query"
-
- "github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
-)
-
-const marker = byte(0xff)
-
-// addP2PCollection adds the given collection ID that the P2P system
-// subscribes to to the the persisted list. It will error if the provided
-// collection ID is invalid.
-func (db *db) addP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error {
- _, err := db.getCollectionBySchemaID(ctx, txn, collectionID)
- if err != nil {
- return NewErrAddingP2PCollection(err)
- }
- key := core.NewP2PCollectionKey(collectionID)
- return txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker})
-}
-
-// removeP2PCollection removes the given collection ID that the P2P system
-// subscribes to from the the persisted list. It will error if the provided
-// collection ID is invalid.
-func (db *db) removeP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error {
- _, err := db.getCollectionBySchemaID(ctx, txn, collectionID)
- if err != nil {
- return NewErrRemovingP2PCollection(err)
- }
- key := core.NewP2PCollectionKey(collectionID)
- return txn.Systemstore().Delete(ctx, key.ToDS())
-}
-
-// getAllP2PCollections returns the list of persisted collection IDs that
-// the P2P system subscribes to.
-func (db *db) getAllP2PCollections(ctx context.Context, txn datastore.Txn) ([]string, error) {
- prefix := core.NewP2PCollectionKey("")
- results, err := db.systemstore().Query(ctx, dsq.Query{
- Prefix: prefix.ToString(),
- })
- if err != nil {
- return nil, err
- }
-
- collectionIDs := []string{}
- for result := range results.Next() {
- key, err := core.NewP2PCollectionKeyFromString(result.Key)
- if err != nil {
- return nil, err
- }
- collectionIDs = append(collectionIDs, key.CollectionID)
- }
-
- return collectionIDs, nil
-}
diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go
deleted file mode 100644
index 67d5393c66..0000000000
--- a/db/p2p_collection_test.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/sourcenetwork/defradb/client"
-)
-
-func newTestCollection(
- t *testing.T,
- ctx context.Context,
- db *implicitTxnDB,
- name string,
-) client.Collection {
- _, err := db.AddSchema(
- ctx,
- fmt.Sprintf(
- `type %s {
- Name: String
- }`,
- name,
- ),
- )
- require.NoError(t, err)
-
- col, err := db.GetCollectionByName(ctx, name)
- require.NoError(t, err)
-
- return col
-}
-
-func TestAddP2PCollection(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
-
- col := newTestCollection(t, ctx, db, "test")
-
- err = db.AddP2PCollections(ctx, []string{col.SchemaID()})
- require.NoError(t, err)
-}
-
-func TestGetAllP2PCollection(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
-
- col1 := newTestCollection(t, ctx, db, "test1")
- col2 := newTestCollection(t, ctx, db, "test2")
- col3 := newTestCollection(t, ctx, db, "test3")
-
- collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()}
- err = db.AddP2PCollections(ctx, collectionIDs)
- require.NoError(t, err)
-
- collections, err := db.GetAllP2PCollections(ctx)
- require.NoError(t, err)
- require.ElementsMatch(t, collections, collectionIDs)
-}
-
-func TestRemoveP2PCollection(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
-
- col1 := newTestCollection(t, ctx, db, "test1")
- col2 := newTestCollection(t, ctx, db, "test2")
- col3 := newTestCollection(t, ctx, db, "test3")
-
- collectionIDs := []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()}
-
- err = db.AddP2PCollections(ctx, collectionIDs)
- require.NoError(t, err)
-
- err = db.RemoveP2PCollections(ctx, []string{col2.SchemaID()})
- require.NoError(t, err)
-
- collections, err := db.GetAllP2PCollections(ctx)
- require.NoError(t, err)
- require.ElementsMatch(t, collections, []string{col1.SchemaID(), col3.SchemaID()})
-}
diff --git a/db/replicator.go b/db/replicator.go
deleted file mode 100644
index 84c94b9f5d..0000000000
--- a/db/replicator.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
- "encoding/json"
- "errors"
-
- ds "github.com/ipfs/go-datastore"
- dsq "github.com/ipfs/go-datastore/query"
- "github.com/libp2p/go-libp2p/core/peer"
-
- "github.com/sourcenetwork/defradb/client"
- "github.com/sourcenetwork/defradb/core"
- "github.com/sourcenetwork/defradb/datastore"
-)
-
-// setReplicator adds a new replicator to the database.
-func (db *db) setReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error {
- existingRep, err := db.getReplicator(ctx, rep.Info)
- if errors.Is(err, ds.ErrNotFound) {
- return db.saveReplicator(ctx, txn, rep)
- }
- if err != nil {
- return err
- }
-
- newSchemas := []string{}
- for _, newSchema := range rep.Schemas {
- isNew := true
- for _, existingSchema := range existingRep.Schemas {
- if existingSchema == newSchema {
- isNew = false
- break
- }
- }
- if isNew {
- newSchemas = append(newSchemas, newSchema)
- }
- }
- rep.Schemas = append(existingRep.Schemas, newSchemas...)
- return db.saveReplicator(ctx, txn, rep)
-}
-
-// deleteReplicator removes a replicator from the database.
-func (db *db) deleteReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error {
- if len(rep.Schemas) == 0 {
- return db.deleteReplicatorKey(ctx, txn, rep.Info.ID)
- }
- return db.deleteSchemasForReplicator(ctx, txn, rep)
-}
-
-func (db *db) deleteReplicatorKey(ctx context.Context, txn datastore.Txn, pid peer.ID) error {
- key := core.NewReplicatorKey(pid.String())
- return txn.Systemstore().Delete(ctx, key.ToDS())
-}
-
-func (db *db) deleteSchemasForReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error {
- existingRep, err := db.getReplicator(ctx, rep.Info)
- if err != nil {
- return err
- }
-
- updatedSchemaList := []string{}
- for _, s := range existingRep.Schemas {
- found := false
- for _, toDelete := range rep.Schemas {
- if toDelete == s {
- found = true
- break
- }
- }
- if !found {
- updatedSchemaList = append(updatedSchemaList, s)
- }
- }
-
- if len(updatedSchemaList) == 0 {
- return db.deleteReplicatorKey(ctx, txn, rep.Info.ID)
- }
-
- existingRep.Schemas = updatedSchemaList
- return db.saveReplicator(ctx, txn, existingRep)
-}
-
-// GetAllReplicators returns all replicators of the database.
-func (db *db) getAllReplicators(ctx context.Context, txn datastore.Txn) ([]client.Replicator, error) {
- reps := []client.Replicator{}
- // create collection system prefix query
- prefix := core.NewReplicatorKey("")
- results, err := txn.Systemstore().Query(ctx, dsq.Query{
- Prefix: prefix.ToString(),
- })
- if err != nil {
- return nil, err
- }
-
- for result := range results.Next() {
- var rep client.Replicator
- err = json.Unmarshal(result.Value, &rep)
- if err != nil {
- return nil, err
- }
-
- reps = append(reps, rep)
- }
-
- return reps, nil
-}
-
-func (db *db) getReplicator(ctx context.Context, info peer.AddrInfo) (client.Replicator, error) {
- rep := client.Replicator{}
- key := core.NewReplicatorKey(info.ID.String())
- value, err := db.systemstore().Get(ctx, key.ToDS())
- if err != nil {
- return rep, err
- }
-
- err = json.Unmarshal(value, &rep)
- if err != nil {
- return rep, err
- }
-
- return rep, nil
-}
-
-func (db *db) saveReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error {
- key := core.NewReplicatorKey(rep.Info.ID.String())
- repBytes, err := json.Marshal(rep)
- if err != nil {
- return err
- }
- return txn.Systemstore().Put(ctx, key.ToDS(), repBytes)
-}
diff --git a/db/replicator_test.go b/db/replicator_test.go
deleted file mode 100644
index f21ab585a9..0000000000
--- a/db/replicator_test.go
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2022 Democratized Data Foundation
-//
-// Use of this software is governed by the Business Source License
-// included in the file licenses/BSL.txt.
-//
-// As of the Change Date specified in that file, in accordance with
-// the Business Source License, use of this software will be governed
-// by the Apache License, Version 2.0, included in the file
-// licenses/APL.txt.
-
-package db
-
-import (
- "context"
- "testing"
-
- ds "github.com/ipfs/go-datastore"
- "github.com/libp2p/go-libp2p/core/peer"
- ma "github.com/multiformats/go-multiaddr"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-
- "github.com/sourcenetwork/defradb/client"
-)
-
-func TestSetReplicator(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test"},
- })
- assert.NoError(t, err)
-}
-
-func TestGetAllReplicatorsWith2Addition(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test"},
- })
- require.NoError(t, err)
-
- a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info2, err := peer.AddrInfoFromP2pAddr(a2)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info2,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- reps, err := db.GetAllReplicators(ctx)
- require.NoError(t, err)
-
- assert.Equal(t, []client.Replicator{
- {
- Info: *info,
- Schemas: []string{"test"},
- },
- {
- Info: *info2,
- Schemas: []string{"test", "test2", "test3"},
- },
- }, reps)
-}
-
-func TestGetAllReplicatorsWith2AdditionsOnSamePeer(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test"},
- })
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- reps, err := db.GetAllReplicators(ctx)
- require.NoError(t, err)
-
- assert.Equal(t, []client.Replicator{
- {
- Info: *info,
- Schemas: []string{"test", "test2", "test3"},
- },
- }, reps)
-}
-
-func TestDeleteSchemaForReplicator(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- err = db.DeleteReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test2"},
- })
- require.NoError(t, err)
-
- rep, err := db.getReplicator(ctx, *info)
- require.NoError(t, err)
-
- assert.Equal(t, client.Replicator{
- Info: *info,
- Schemas: []string{"test", "test3"},
- }, rep)
-}
-
-func TestDeleteAllSchemasForReplicator(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- err = db.DeleteReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- _, err = db.getReplicator(ctx, *info)
- require.ErrorIs(t, err, ds.ErrNotFound)
-}
-
-func TestDeleteReplicatorWith2Addition(t *testing.T) {
- ctx := context.Background()
- db, err := newMemoryDB(ctx)
- require.NoError(t, err)
- defer db.Close(ctx)
- a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info, err := peer.AddrInfoFromP2pAddr(a)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info,
- Schemas: []string{"test"},
- })
- require.NoError(t, err)
-
- a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C")
- require.NoError(t, err)
-
- // Extract the peer ID from the multiaddr.
- info2, err := peer.AddrInfoFromP2pAddr(a2)
- require.NoError(t, err)
-
- err = db.SetReplicator(ctx, client.Replicator{
- Info: *info2,
- Schemas: []string{"test", "test2", "test3"},
- })
- require.NoError(t, err)
-
- reps, err := db.GetAllReplicators(ctx)
- require.NoError(t, err)
-
- assert.Equal(t, []client.Replicator{
- {
- Info: *info,
- Schemas: []string{"test"},
- },
- {
- Info: *info2,
- Schemas: []string{"test", "test2", "test3"},
- },
- }, reps)
-
- err = db.DeleteReplicator(ctx, client.Replicator{Info: *info})
- require.NoError(t, err)
-
- reps, err = db.GetAllReplicators(ctx)
- require.NoError(t, err)
-
- assert.Equal(t, []client.Replicator{
- {
- Info: *info2,
- Schemas: []string{"test", "test2", "test3"},
- },
- }, reps)
-}
diff --git a/db/txn_db.go b/db/txn_db.go
index e996d9a9c8..0627f8ebc8 100644
--- a/db/txn_db.go
+++ b/db/txn_db.go
@@ -121,70 +121,6 @@ func (db *explicitTxnDB) GetCollectionByVersionID(
return db.getCollectionByVersionID(ctx, db.txn, schemaVersionID)
}
-// AddP2PCollections adds the given collection IDs to the P2P system and
-// subscribes to their topics. It will error if any of the provided
-// collection IDs are invalid.
-func (db *implicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- for _, collectionID := range collectionIDs {
- err = db.addP2PCollection(ctx, txn, collectionID)
- if err != nil {
- return err
- }
- }
- return txn.Commit(ctx)
-}
-
-// AddP2PCollections adds the given collection IDs to the P2P system and
-// subscribes to their topics. It will error if any of the provided
-// collection IDs are invalid.
-func (db *explicitTxnDB) AddP2PCollections(ctx context.Context, collectionIDs []string) error {
- for _, collectionID := range collectionIDs {
- err := db.addP2PCollection(ctx, db.txn, collectionID)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-// RemoveP2PCollections removes the given collection IDs from the P2P system and
-// unsubscribes from their topics. It will error if the provided
-// collection IDs are invalid.
-func (db *implicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- for _, collectionID := range collectionIDs {
- err = db.removeP2PCollection(ctx, txn, collectionID)
- if err != nil {
- return err
- }
- }
- return txn.Commit(ctx)
-}
-
-// RemoveP2PCollections removes the given collection IDs from the P2P system and
-// unsubscribes from their topics. It will error if the provided
-// collection IDs are invalid.
-func (db *explicitTxnDB) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error {
- for _, collectionID := range collectionIDs {
- err := db.removeP2PCollection(ctx, db.txn, collectionID)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
// GetAllCollections gets all the currently defined collections.
func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) {
txn, err := db.NewTxn(ctx, true)
@@ -332,82 +268,6 @@ func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig
return db.lensRegistry.SetMigration(ctx, cfg)
}
-// SetReplicator adds a new replicator to the database.
-func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.setReplicator(ctx, txn, rep)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// SetReplicator adds a new replicator to the database.
-func (db *explicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error {
- return db.setReplicator(ctx, db.txn, rep)
-}
-
-// DeleteReplicator removes a replicator from the database.
-func (db *implicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
- txn, err := db.NewTxn(ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(ctx)
-
- err = db.deleteReplicator(ctx, txn, rep)
- if err != nil {
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// DeleteReplicator removes a replicator from the database.
-func (db *explicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
- return db.deleteReplicator(ctx, db.txn, rep)
-}
-
-// GetAllReplicators returns all replicators of the database.
-func (db *implicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getAllReplicators(ctx, txn)
-}
-
-// GetAllReplicators returns all replicators of the database.
-func (db *explicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
- return db.getAllReplicators(ctx, db.txn)
-}
-
-// GetAllP2PCollections returns the list of persisted collection IDs that
-// the P2P system subscribes to.
-func (db *implicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) {
- txn, err := db.NewTxn(ctx, true)
- if err != nil {
- return nil, err
- }
- defer txn.Discard(ctx)
-
- return db.getAllP2PCollections(ctx, txn)
-}
-
-// GetAllP2PCollections returns the list of persisted collection IDs that
-// the P2P system subscribes to.
-func (db *explicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) {
- return db.getAllP2PCollections(ctx, db.txn)
-}
-
// BasicImport imports a json dataset.
// filepath must be accessible to the node.
func (db *implicitTxnDB) BasicImport(ctx context.Context, filepath string) error {
diff --git a/http/client.go b/http/client.go
index 21006f2194..d74e4f404d 100644
--- a/http/client.go
+++ b/http/client.go
@@ -87,94 +87,6 @@ func (c *Client) WithTxn(tx datastore.Txn) client.Store {
return &Client{client}
}
-func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error {
- methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
-
- body, err := json.Marshal(rep)
- if err != nil {
- return err
- }
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body))
- if err != nil {
- return err
- }
- _, err = c.http.request(req)
- return err
-}
-
-func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
- methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
-
- body, err := json.Marshal(rep)
- if err != nil {
- return err
- }
- req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body))
- if err != nil {
- return err
- }
- _, err = c.http.request(req)
- return err
-}
-
-func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
- methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil)
- if err != nil {
- return nil, err
- }
- var reps []client.Replicator
- if err := c.http.requestJson(req, &reps); err != nil {
- return nil, err
- }
- return reps, nil
-}
-
-func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error {
- methodURL := c.http.baseURL.JoinPath("p2p", "collections")
-
- body, err := json.Marshal(collectionIDs)
- if err != nil {
- return err
- }
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body))
- if err != nil {
- return err
- }
- _, err = c.http.request(req)
- return err
-}
-
-func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error {
- methodURL := c.http.baseURL.JoinPath("p2p", "collections")
-
- body, err := json.Marshal(collectionIDs)
- if err != nil {
- return err
- }
- req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body))
- if err != nil {
- return err
- }
- _, err = c.http.request(req)
- return err
-}
-
-func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) {
- methodURL := c.http.baseURL.JoinPath("p2p", "collections")
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil)
- if err != nil {
- return nil, err
- }
- var cols []string
- if err := c.http.requestJson(req, &cols); err != nil {
- return nil, err
- }
- return cols, nil
-}
-
func (c *Client) BasicImport(ctx context.Context, filepath string) error {
methodURL := c.http.baseURL.JoinPath("backup", "import")
@@ -425,21 +337,7 @@ func (c *Client) PrintDump(ctx context.Context) error {
return err
}
-func (c *Client) PeerInfo(ctx context.Context) (*PeerInfoResponse, error) {
- methodURL := c.http.baseURL.JoinPath("p2p", "info")
-
- req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil)
- if err != nil {
- return nil, err
- }
- var res PeerInfoResponse
- if err := c.http.requestJson(req, &res); err != nil {
- return nil, err
- }
- return &res, nil
-}
-
-func (c *Client) Close(ctx context.Context) {
+func (c *Client) Close() {
// do nothing
}
diff --git a/http/client_p2p.go b/http/client_p2p.go
new file mode 100644
index 0000000000..8d5f470f99
--- /dev/null
+++ b/http/client_p2p.go
@@ -0,0 +1,124 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package http
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/sourcenetwork/defradb/client"
+)
+
+func (c *Client) PeerInfo() peer.AddrInfo {
+ methodURL := c.http.baseURL.JoinPath("p2p", "info")
+
+ req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, methodURL.String(), nil)
+ if err != nil {
+ return peer.AddrInfo{}
+ }
+ var res peer.AddrInfo
+ if err := c.http.requestJson(req, &res); err != nil {
+ return peer.AddrInfo{}
+ }
+ return res
+}
+
+func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error {
+ methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
+
+ body, err := json.Marshal(rep)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body))
+ if err != nil {
+ return err
+ }
+ _, err = c.http.request(req)
+ return err
+}
+
+func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
+ methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
+
+ body, err := json.Marshal(rep)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body))
+ if err != nil {
+ return err
+ }
+ _, err = c.http.request(req)
+ return err
+}
+
+func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
+ methodURL := c.http.baseURL.JoinPath("p2p", "replicators")
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ var reps []client.Replicator
+ if err := c.http.requestJson(req, &reps); err != nil {
+ return nil, err
+ }
+ return reps, nil
+}
+
+func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error {
+ methodURL := c.http.baseURL.JoinPath("p2p", "collections")
+
+ body, err := json.Marshal(collectionIDs)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body))
+ if err != nil {
+ return err
+ }
+ _, err = c.http.request(req)
+ return err
+}
+
+func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error {
+ methodURL := c.http.baseURL.JoinPath("p2p", "collections")
+
+ body, err := json.Marshal(collectionIDs)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body))
+ if err != nil {
+ return err
+ }
+ _, err = c.http.request(req)
+ return err
+}
+
+func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) {
+ methodURL := c.http.baseURL.JoinPath("p2p", "collections")
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ var cols []string
+ if err := c.http.requestJson(req, &cols); err != nil {
+ return nil, err
+ }
+ return cols, nil
+}
diff --git a/http/errors.go b/http/errors.go
index 848d293a91..7e07053df5 100644
--- a/http/errors.go
+++ b/http/errors.go
@@ -37,6 +37,7 @@ var (
ErrMigrationNotFound = errors.New("migration not found")
ErrMissingRequest = errors.New("missing request")
ErrInvalidTransactionId = errors.New("invalid transaction id")
+ ErrP2PDisabled = errors.New("p2p network is disabled")
)
type errorResponse struct {
diff --git a/http/handler.go b/http/handler.go
index d8cd33c444..b9b5754419 100644
--- a/http/handler.go
+++ b/http/handler.go
@@ -40,6 +40,7 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler {
tx_handler := &txHandler{}
store_handler := &storeHandler{}
collection_handler := &collectionHandler{}
+ p2p_handler := &p2pHandler{}
lens_handler := &lensHandler{}
ccip_handler := &ccipHandler{}
@@ -99,16 +100,16 @@ func NewHandler(db client.DB, opts ServerOptions) *Handler {
ccip.Post("/", ccip_handler.ExecCCIP)
})
api.Route("/p2p", func(p2p chi.Router) {
- p2p.Get("/info", store_handler.PeerInfo)
+ p2p.Get("/info", p2p_handler.PeerInfo)
p2p.Route("/replicators", func(p2p_replicators chi.Router) {
- p2p_replicators.Get("/", store_handler.GetAllReplicators)
- p2p_replicators.Post("/", store_handler.SetReplicator)
- p2p_replicators.Delete("/", store_handler.DeleteReplicator)
+ p2p_replicators.Get("/", p2p_handler.GetAllReplicators)
+ p2p_replicators.Post("/", p2p_handler.SetReplicator)
+ p2p_replicators.Delete("/", p2p_handler.DeleteReplicator)
})
p2p.Route("/collections", func(p2p_collections chi.Router) {
- p2p_collections.Get("/", store_handler.GetAllP2PCollections)
- p2p_collections.Post("/", store_handler.AddP2PCollection)
- p2p_collections.Delete("/", store_handler.RemoveP2PCollection)
+ p2p_collections.Get("/", p2p_handler.GetAllP2PCollections)
+ p2p_collections.Post("/", p2p_handler.AddP2PCollection)
+ p2p_collections.Delete("/", p2p_handler.RemoveP2PCollection)
})
})
api.Route("/debug", func(debug chi.Router) {
diff --git a/http/handler_p2p.go b/http/handler_p2p.go
new file mode 100644
index 0000000000..cec11b8325
--- /dev/null
+++ b/http/handler_p2p.go
@@ -0,0 +1,138 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package http
+
+import (
+ "net/http"
+
+ "github.com/sourcenetwork/defradb/client"
+)
+
+type p2pHandler struct{}
+
+func (s *p2pHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+ responseJSON(rw, http.StatusOK, p2p.PeerInfo())
+}
+
+func (s *p2pHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ var rep client.Replicator
+ if err := requestJSON(req, &rep); err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ err := p2p.SetReplicator(req.Context(), rep)
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ rw.WriteHeader(http.StatusOK)
+}
+
+func (s *p2pHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ var rep client.Replicator
+ if err := requestJSON(req, &rep); err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ err := p2p.DeleteReplicator(req.Context(), rep)
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ rw.WriteHeader(http.StatusOK)
+}
+
+func (s *p2pHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ reps, err := p2p.GetAllReplicators(req.Context())
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ responseJSON(rw, http.StatusOK, reps)
+}
+
+func (s *p2pHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ var collectionIDs []string
+ if err := requestJSON(req, &collectionIDs); err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ err := p2p.AddP2PCollections(req.Context(), collectionIDs)
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ rw.WriteHeader(http.StatusOK)
+}
+
+func (s *p2pHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ var collectionIDs []string
+ if err := requestJSON(req, &collectionIDs); err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ err := p2p.RemoveP2PCollections(req.Context(), collectionIDs)
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ rw.WriteHeader(http.StatusOK)
+}
+
+func (s *p2pHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) {
+ p2p, ok := req.Context().Value(dbContextKey).(client.P2P)
+ if !ok {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled})
+ return
+ }
+
+ cols, err := p2p.GetAllP2PCollections(req.Context())
+ if err != nil {
+ responseJSON(rw, http.StatusBadRequest, errorResponse{err})
+ return
+ }
+ responseJSON(rw, http.StatusOK, cols)
+}
diff --git a/http/handler_store.go b/http/handler_store.go
index 6361a7b900..ce58383548 100644
--- a/http/handler_store.go
+++ b/http/handler_store.go
@@ -22,92 +22,6 @@ import (
type storeHandler struct{}
-func (s *storeHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- var rep client.Replicator
- if err := requestJSON(req, &rep); err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- err := store.SetReplicator(req.Context(), rep)
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- rw.WriteHeader(http.StatusOK)
-}
-
-func (s *storeHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- var rep client.Replicator
- if err := requestJSON(req, &rep); err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- err := store.DeleteReplicator(req.Context(), rep)
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- rw.WriteHeader(http.StatusOK)
-}
-
-func (s *storeHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- reps, err := store.GetAllReplicators(req.Context())
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- responseJSON(rw, http.StatusOK, reps)
-}
-
-func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- var collectionIDs []string
- if err := requestJSON(req, &collectionIDs); err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- err := store.AddP2PCollections(req.Context(), collectionIDs)
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- rw.WriteHeader(http.StatusOK)
-}
-
-func (s *storeHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- var collectionIDs []string
- if err := requestJSON(req, &collectionIDs); err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- err := store.RemoveP2PCollections(req.Context(), collectionIDs)
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- rw.WriteHeader(http.StatusOK)
-}
-
-func (s *storeHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) {
- store := req.Context().Value(storeContextKey).(client.Store)
-
- cols, err := store.GetAllP2PCollections(req.Context())
- if err != nil {
- responseJSON(rw, http.StatusBadRequest, errorResponse{err})
- return
- }
- responseJSON(rw, http.StatusOK, cols)
-}
-
func (s *storeHandler) BasicImport(rw http.ResponseWriter, req *http.Request) {
store := req.Context().Value(storeContextKey).(client.Store)
@@ -250,18 +164,6 @@ func (s *storeHandler) PrintDump(rw http.ResponseWriter, req *http.Request) {
rw.WriteHeader(http.StatusOK)
}
-type PeerInfoResponse struct {
- PeerID string `json:"peerID"`
-}
-
-func (s *storeHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) {
- var res PeerInfoResponse
- if value, ok := req.Context().Value(peerIdContextKey).(string); ok {
- res.PeerID = value
- }
- responseJSON(rw, http.StatusOK, &res)
-}
-
type GraphQLRequest struct {
Query string `json:"query"`
}
diff --git a/http/middleware.go b/http/middleware.go
index 932797ff2c..d33cbfb5ff 100644
--- a/http/middleware.go
+++ b/http/middleware.go
@@ -53,8 +53,6 @@ var (
// If a transaction exists, all operations will be executed
// in the current transaction context.
colContextKey = contextKey("col")
- // peerIdContextKey contains the peerId of the DefraDB node.
- peerIdContextKey = contextKey("peerId")
)
// CorsMiddleware handles cross origin request
@@ -83,7 +81,6 @@ func ApiMiddleware(db client.DB, txs *sync.Map, opts ServerOptions) func(http.Ha
ctx := req.Context()
ctx = context.WithValue(ctx, dbContextKey, db)
ctx = context.WithValue(ctx, txsContextKey, txs)
- ctx = context.WithValue(ctx, peerIdContextKey, opts.PeerID)
next.ServeHTTP(rw, req.WithContext(ctx))
})
}
diff --git a/http/server.go b/http/server.go
index ccfefb08b1..854a73f506 100644
--- a/http/server.go
+++ b/http/server.go
@@ -61,8 +61,6 @@ type Server struct {
type ServerOptions struct {
// AllowedOrigins is the list of allowed origins for CORS.
AllowedOrigins []string
- // PeerID is the p2p id of the server node.
- PeerID string
// TLS enables https when the value is present.
TLS immutable.Option[TLSOptions]
// RootDirectory is the directory for the node config.
@@ -162,13 +160,6 @@ func WithCAEmail(email string) func(*Server) {
}
}
-// WithPeerID returns an option to set the identifier of the server node.
-func WithPeerID(id string) func(*Server) {
- return func(s *Server) {
- s.options.PeerID = id
- }
-}
-
// WithRootDir returns an option to set the root directory for the node config.
func WithRootDir(rootDir string) func(*Server) {
return func(s *Server) {
diff --git a/http/server_test.go b/http/server_test.go
index 790f710249..33db303454 100644
--- a/http/server_test.go
+++ b/http/server_test.go
@@ -221,11 +221,6 @@ func TestNewServerWithCAEmail(t *testing.T) {
assert.Equal(t, "me@example.com", s.options.TLS.Value().Email)
}
-func TestNewServerWithPeerID(t *testing.T) {
- s := NewServer(nil, WithPeerID("12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR"))
- assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", s.options.PeerID)
-}
-
func TestNewServerWithRootDir(t *testing.T) {
dir := t.TempDir()
s := NewServer(nil, WithRootDir(dir))
diff --git a/net/client_test.go b/net/client_test.go
index e28c543175..f9fc495d40 100644
--- a/net/client_test.go
+++ b/net/client_test.go
@@ -15,7 +15,6 @@ import (
"testing"
"github.com/libp2p/go-libp2p/core/peer"
- ma "github.com/multiformats/go-multiaddr"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
@@ -76,12 +75,7 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) {
_, n2 := newTestNode(ctx, t)
n2.Start()
- err := n1.host.Connect(ctx, peer.AddrInfo{
- ID: n2.PeerID(),
- Addrs: []ma.Multiaddr{
- n2.host.Addrs()[0],
- },
- })
+ err := n1.host.Connect(ctx, n2.PeerInfo())
require.NoError(t, err)
_, err = n1.db.AddSchema(ctx, `type User {
@@ -116,6 +110,6 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) {
SchemaID: col.SchemaID(),
Block: &EmptyNode{},
Priority: 1,
- }, n2.PeerID())
+ }, n2.PeerInfo().ID)
require.NoError(t, err)
}
diff --git a/net/dag_test.go b/net/dag_test.go
index 7373967a76..124c464db4 100644
--- a/net/dag_test.go
+++ b/net/dag_test.go
@@ -39,8 +39,7 @@ func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) {
n.sendJobWorker()
close(done)
}()
- err := n.Close()
- require.NoError(t, err)
+ n.Close()
select {
case <-done:
case <-time.After(timeout):
@@ -83,8 +82,7 @@ func TestSendJobWorker_WithNewJobWithClosePriorToProcessing_NoError(t *testing.T
txn: txn,
}
- err = n.Close()
- require.NoError(t, err)
+ n.Close()
select {
case <-done:
case <-time.After(timeout):
@@ -128,8 +126,7 @@ func TestSendJobWorker_WithNewJob_NoError(t *testing.T) {
}
// Give the jobworker time to process the job.
time.Sleep(100 * time.Microsecond)
- err = n.Close()
- require.NoError(t, err)
+ n.Close()
select {
case <-done:
case <-time.After(timeout):
@@ -174,8 +171,7 @@ func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) {
n.closeJob <- dsKey.DocKey
- err = n.Close()
- require.NoError(t, err)
+ n.Close()
select {
case <-done:
case <-time.After(timeout):
@@ -250,10 +246,8 @@ func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) {
}
// Give the jobworker time to process the job.
time.Sleep(100 * time.Microsecond)
- err = n1.Close()
- require.NoError(t, err)
- err = n2.Close()
- require.NoError(t, err)
+ n1.Close()
+ n2.Close()
select {
case <-done:
case <-time.After(timeout):
@@ -347,10 +341,8 @@ func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) {
}
// Give the jobworker time to process the job.
time.Sleep(100 * time.Microsecond)
- err = n1.Close()
- require.NoError(t, err)
- err = n2.Close()
- require.NoError(t, err)
+ n1.Close()
+ n2.Close()
select {
case <-done:
case <-time.After(timeout):
diff --git a/net/errors.go b/net/errors.go
index 3f8d4926c5..e9ac8fc748 100644
--- a/net/errors.go
+++ b/net/errors.go
@@ -13,6 +13,8 @@ package net
import (
"fmt"
+ "github.com/libp2p/go-libp2p/core/peer"
+
"github.com/sourcenetwork/defradb/errors"
)
@@ -21,6 +23,9 @@ const (
errFailedToGetDockey = "failed to get DocKey from broadcast message"
errPublishingToDockeyTopic = "can't publish log %s for dockey %s"
errPublishingToSchemaTopic = "can't publish log %s for schema %s"
+ errReplicatorExists = "replicator already exists for %s with peerID %s"
+ errReplicatorDocKey = "failed to get dockey for replicator %s with peerID %s"
+ errReplicatorCollections = "failed to get collections for replicator"
)
var (
@@ -47,3 +52,15 @@ func NewErrPublishingToDockeyTopic(inner error, cid, key string, kv ...errors.KV
func NewErrPublishingToSchemaTopic(inner error, cid, key string, kv ...errors.KV) error {
return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, key), inner, kv...)
}
+
+func NewErrReplicatorExists(collection string, peerID peer.ID, kv ...errors.KV) error {
+ return errors.New(fmt.Sprintf(errReplicatorExists, collection, peerID), kv...)
+}
+
+func NewErrReplicatorDocKey(inner error, collection string, peerID peer.ID, kv ...errors.KV) error {
+ return errors.Wrap(fmt.Sprintf(errReplicatorDocKey, collection, peerID), inner, kv...)
+}
+
+func NewErrReplicatorCollections(inner error, kv ...errors.KV) error {
+ return errors.Wrap(errReplicatorCollections, inner, kv...)
+}
diff --git a/net/node.go b/net/node.go
index 392267fefa..b2a9f08ed8 100644
--- a/net/node.go
+++ b/net/node.go
@@ -51,10 +51,12 @@ import (
var evtWaitTimeout = 10 * time.Second
+var _ client.P2P = (*Node)(nil)
+
// Node is a networked peer instance of DefraDB.
type Node struct {
// embed the DB interface into the node
- DB client.DB
+ client.DB
*Peer
@@ -124,8 +126,8 @@ func NewNode(
return ddht, err
}),
}
- if options.EnableRelay {
- libp2pOpts = append(libp2pOpts, libp2p.EnableRelay())
+ if !options.EnableRelay {
+ libp2pOpts = append(libp2pOpts, libp2p.DisableRelay())
}
h, err := libp2p.New(libp2pOpts...)
@@ -223,21 +225,18 @@ func (n *Node) Bootstrap(addrs []peer.AddrInfo) {
}
}
-// ListenAddrs returns the Multiaddr list of the hosts' listening addresses.
-func (n *Node) ListenAddrs() []multiaddr.Multiaddr {
- return n.host.Addrs()
-}
-
-// PeerID returns the node's peer ID.
func (n *Node) PeerID() peer.ID {
return n.host.ID()
}
-// PeerInfo returns the node's peer id and listening addresses.
+func (n *Node) ListenAddrs() []multiaddr.Multiaddr {
+ return n.host.Network().ListenAddresses()
+}
+
func (n *Node) PeerInfo() peer.AddrInfo {
return peer.AddrInfo{
ID: n.host.ID(),
- Addrs: n.host.Addrs(),
+ Addrs: n.host.Network().ListenAddresses(),
}
}
@@ -405,7 +404,12 @@ func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, er
}
// Close closes the node and all its services.
-func (n Node) Close() error {
- n.cancel()
- return n.Peer.Close()
+func (n Node) Close() {
+ if n.cancel != nil {
+ n.cancel()
+ }
+ if n.Peer != nil {
+ n.Peer.Close()
+ }
+ n.DB.Close()
}
diff --git a/net/node_test.go b/net/node_test.go
index 941d171726..3e50dfa797 100644
--- a/net/node_test.go
+++ b/net/node_test.go
@@ -60,9 +60,11 @@ func TestNewNode_WithEnableRelay_NoError(t *testing.T) {
func TestNewNode_WithDBClosed_NoError(t *testing.T) {
ctx := context.Background()
store := memory.NewDatastore(ctx)
+
db, err := db.NewDB(ctx, store, db.WithUpdateEvents())
require.NoError(t, err)
- db.Close(ctx)
+ db.Close()
+
_, err = NewNode(
context.Background(),
db,
@@ -111,8 +113,7 @@ func TestNodeClose_NoError(t *testing.T) {
db,
)
require.NoError(t, err)
- err = n.Close()
- require.NoError(t, err)
+ n.Close()
}
func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) {
@@ -223,7 +224,6 @@ func TestNodeConfig_NoError(t *testing.T) {
for k, v := range options.ListenAddrs {
require.Equal(t, expectedOptions.ListenAddrs[k], v)
}
-
require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub)
require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay)
}
@@ -448,9 +448,10 @@ func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing
}
func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) {
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
@@ -472,9 +473,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T)
defer func() {
evtWaitTimeout = 10 * time.Second
}()
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
@@ -490,9 +492,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T)
}
func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) {
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
@@ -510,9 +513,10 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *
}
func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) {
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
@@ -534,9 +538,10 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T
defer func() {
evtWaitTimeout = 10 * time.Second
}()
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
@@ -552,9 +557,10 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T
}
func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) {
+ ctx := context.Background()
db := FixtureNewMemoryDBWithBroadcaster(t)
n, err := NewNode(
- context.Background(),
+ ctx,
db,
)
require.NoError(t, err)
diff --git a/net/peer.go b/net/peer.go
index e24d124210..305df7caa9 100644
--- a/net/peer.go
+++ b/net/peer.go
@@ -14,7 +14,6 @@ package net
import (
"context"
- "fmt"
"sync"
"time"
@@ -194,7 +193,7 @@ func (p *Peer) Start() error {
}
// Close the peer node and all its internal workers/goroutines/loops.
-func (p *Peer) Close() error {
+func (p *Peer) Close() {
// close topics
if err := p.server.removeAllPubsubTopics(); err != nil {
log.ErrorE(p.ctx, "Error closing pubsub topics", err)
@@ -234,7 +233,6 @@ func (p *Peer) Close() error {
}
p.cancel()
- return nil
}
// handleBroadcast loop manages the transition of messages
@@ -307,124 +305,6 @@ func (p *Peer) RegisterNewDocument(
return p.server.publishLog(p.ctx, schemaID, req)
}
-// SetReplicator adds a target peer node as a replication destination for documents in our DB.
-func (p *Peer) SetReplicator(
- ctx context.Context,
- rep client.Replicator,
-) error {
- txn, err := p.db.NewTxn(ctx, true)
- if err != nil {
- return err
- }
- store := p.db.WithTxn(txn)
-
- err = p.setReplicator(ctx, store, rep.Info, rep.Schemas...)
- if err != nil {
- txn.Discard(ctx)
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-// setReplicator adds a target peer node as a replication destination for documents in our DB.
-func (p *Peer) setReplicator(
- ctx context.Context,
- store client.Store,
- info peer.AddrInfo,
- collectionNames ...string,
-) error {
- // verify collections
- collections := []client.Collection{}
- schemas := []string{}
- if len(collectionNames) == 0 {
- var err error
- collections, err = store.GetAllCollections(ctx)
- if err != nil {
- return errors.Wrap("failed to get all collections for replicator", err)
- }
- for _, col := range collections {
- schemas = append(schemas, col.SchemaID())
- }
- } else {
- for _, cName := range collectionNames {
- col, err := store.GetCollectionByName(ctx, cName)
- if err != nil {
- return errors.Wrap("failed to get collection for replicator", err)
- }
- collections = append(collections, col)
- schemas = append(schemas, col.SchemaID())
- }
- }
-
- // make sure it's not ourselves
- if info.ID == p.host.ID() {
- return errors.New("can't target ourselves as a replicator")
- }
- if err := info.ID.Validate(); err != nil {
- return err
- }
-
- // Add the destination's peer multiaddress in the peerstore.
- // This will be used during connection and stream creation by libp2p.
- p.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL)
-
- // make sure we're not duplicating things
- p.mu.Lock()
- for _, col := range collections {
- if reps, exists := p.replicators[col.SchemaID()]; exists {
- if _, exists := reps[info.ID]; exists {
- p.mu.Unlock()
- return errors.New(fmt.Sprintf(
- "Replicator already exists for %s with PeerID %s",
- col.Name(),
- info.ID,
- ))
- }
- } else {
- p.replicators[col.SchemaID()] = make(map[peer.ID]struct{})
- }
- // add to replicators list for the collection
- p.replicators[col.SchemaID()][info.ID] = struct{}{}
- }
- p.mu.Unlock()
-
- // Persist peer in datastore
- err := p.db.SetReplicator(ctx, client.Replicator{
- Info: info,
- Schemas: schemas,
- })
- if err != nil {
- return errors.Wrap("failed to persist replicator", err)
- }
-
- for _, col := range collections {
- // create read only txn and assign to col
- txn, err := p.db.NewTxn(ctx, true)
- if err != nil {
- return errors.Wrap("failed to get txn", err)
- }
- col = col.WithTxn(txn)
-
- // get dockeys (all)
- keysCh, err := col.GetAllDocKeys(ctx)
- if err != nil {
- txn.Discard(ctx)
- return errors.Wrap(
- fmt.Sprintf(
- "Failed to get dockey for replicator %s on %s",
- info.ID,
- col.Name(),
- ),
- err,
- )
- }
-
- p.pushToReplicator(ctx, txn, col, keysCh, info.ID)
- }
- return nil
-}
-
func (p *Peer) pushToReplicator(
ctx context.Context,
txn datastore.Txn,
@@ -491,102 +371,8 @@ func (p *Peer) pushToReplicator(
}
}
-// DeleteReplicator removes a peer node from the replicators.
-func (p *Peer) DeleteReplicator(
- ctx context.Context,
- rep client.Replicator,
-) error {
- log.Debug(ctx, "Received DeleteReplicator request")
-
- txn, err := p.db.NewTxn(ctx, true)
- if err != nil {
- return err
- }
- store := p.db.WithTxn(txn)
-
- err = p.deleteReplicator(ctx, store, rep.Info, rep.Schemas...)
- if err != nil {
- txn.Discard(ctx)
- return err
- }
-
- return txn.Commit(ctx)
-}
-
-func (p *Peer) deleteReplicator(
- ctx context.Context,
- store client.Store,
- info peer.AddrInfo,
- collectionNames ...string,
-) error {
- // make sure it's not ourselves
- if info.ID == p.host.ID() {
- return ErrSelfTargetForReplicator
- }
- if err := info.ID.Validate(); err != nil {
- return err
- }
-
- // verify collections
- schemas := []string{}
- schemaMap := make(map[string]struct{})
- if len(collectionNames) == 0 {
- var err error
- collections, err := store.GetAllCollections(ctx)
- if err != nil {
- return errors.Wrap("failed to get all collections for replicator", err)
- }
- for _, col := range collections {
- schemas = append(schemas, col.SchemaID())
- schemaMap[col.SchemaID()] = struct{}{}
- }
- } else {
- for _, cName := range collectionNames {
- col, err := store.GetCollectionByName(ctx, cName)
- if err != nil {
- return errors.Wrap("failed to get collection for replicator", err)
- }
- schemas = append(schemas, col.SchemaID())
- schemaMap[col.SchemaID()] = struct{}{}
- }
- }
-
- // make sure we're not duplicating things
- p.mu.Lock()
- defer p.mu.Unlock()
-
- totalSchemas := 0 // Lets keep track of how many schemas are left for the replicator.
- for schema, rep := range p.replicators {
- if _, exists := rep[info.ID]; exists {
- if _, toDelete := schemaMap[schema]; toDelete {
- delete(p.replicators[schema], info.ID)
- } else {
- totalSchemas++
- }
- }
- }
-
- if totalSchemas == 0 {
- // Remove the destination's peer multiaddress in the peerstore.
- p.host.Peerstore().ClearAddrs(info.ID)
- }
-
- // Delete peer in datastore
- return p.db.DeleteReplicator(ctx, client.Replicator{
- Info: peer.AddrInfo{ID: info.ID},
- Schemas: schemas,
- })
-}
-
-// GetAllReplicators returns all replicators and the schemas that are replicated to them.
-func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
- log.Debug(ctx, "Received GetAllReplicators request")
-
- return p.db.GetAllReplicators(ctx)
-}
-
func (p *Peer) loadReplicators(ctx context.Context) error {
- reps, err := p.db.GetAllReplicators(ctx)
+ reps, err := p.GetAllReplicators(ctx)
if err != nil {
return errors.Wrap("failed to get replicators", err)
}
@@ -617,7 +403,7 @@ func (p *Peer) loadReplicators(ctx context.Context) error {
}
func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, error) {
- collections, err := p.db.GetAllP2PCollections(ctx)
+ collections, err := p.GetAllP2PCollections(ctx)
if err != nil && !errors.Is(err, ds.ErrNotFound) {
return nil, err
}
@@ -794,164 +580,3 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error {
}
return cause
}
-
-// AddP2PCollection adds the given collectionID to the pubsup topics.
-//
-// It will error if the given collectionID is invalid, in such a case some of the
-// changes to the server may still be applied.
-//
-// WARNING: Calling this on collections with a large number of documents may take a long time to process.
-func (p *Peer) AddP2PCollections(
- ctx context.Context,
- collectionIDs []string,
-) error {
- log.Debug(ctx, "Received AddP2PCollections request")
-
- txn, err := p.db.NewTxn(p.ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(p.ctx)
- store := p.db.WithTxn(txn)
-
- // first let's make sure the collections actually exists
- storeCollections := []client.Collection{}
- for _, col := range collectionIDs {
- storeCol, err := store.GetCollectionBySchemaID(p.ctx, col)
- if err != nil {
- return err
- }
- storeCollections = append(storeCollections, storeCol)
- }
-
- // Ensure we can add all the collections to the store on the transaction
- // before adding to topics.
- err = store.AddP2PCollections(p.ctx, collectionIDs)
- if err != nil {
- return err
- }
-
- // Add pubsub topics and remove them if we get an error.
- addedTopics := []string{}
- for _, col := range collectionIDs {
- err = p.server.addPubSubTopic(col, true)
- if err != nil {
- return p.rollbackAddPubSubTopics(addedTopics, err)
- }
- addedTopics = append(addedTopics, col)
- }
-
- // After adding the collection topics, we remove the collections' documents
- // from the pubsub topics to avoid receiving duplicate events.
- removedTopics := []string{}
- for _, col := range storeCollections {
- keyChan, err := col.GetAllDocKeys(p.ctx)
- if err != nil {
- return err
- }
- for key := range keyChan {
- err := p.server.removePubSubTopic(key.Key.String())
- if err != nil {
- return p.rollbackRemovePubSubTopics(removedTopics, err)
- }
- removedTopics = append(removedTopics, key.Key.String())
- }
- }
-
- if err = txn.Commit(p.ctx); err != nil {
- err = p.rollbackRemovePubSubTopics(removedTopics, err)
- return p.rollbackAddPubSubTopics(addedTopics, err)
- }
-
- return nil
-}
-
-// RemoveP2PCollection removes the given collectionID from the pubsup topics.
-//
-// It will error if the given collectionID is invalid, in such a case some of the
-// changes to the server may still be applied.
-//
-// WARNING: Calling this on collections with a large number of documents may take a long time to process.
-func (p *Peer) RemoveP2PCollections(
- ctx context.Context,
- collectionIDs []string,
-) error {
- log.Debug(ctx, "Received RemoveP2PCollections request")
-
- txn, err := p.db.NewTxn(p.ctx, false)
- if err != nil {
- return err
- }
- defer txn.Discard(p.ctx)
- store := p.db.WithTxn(txn)
-
- // first let's make sure the collections actually exists
- storeCollections := []client.Collection{}
- for _, col := range collectionIDs {
- storeCol, err := store.GetCollectionBySchemaID(p.ctx, col)
- if err != nil {
- return err
- }
- storeCollections = append(storeCollections, storeCol)
- }
-
- // Ensure we can remove all the collections to the store on the transaction
- // before adding to topics.
- err = store.RemoveP2PCollections(p.ctx, collectionIDs)
- if err != nil {
- return err
- }
-
- // Remove pubsub topics and add them back if we get an error.
- removedTopics := []string{}
- for _, col := range collectionIDs {
- err = p.server.removePubSubTopic(col)
- if err != nil {
- return p.rollbackRemovePubSubTopics(removedTopics, err)
- }
- removedTopics = append(removedTopics, col)
- }
-
- // After removing the collection topics, we add back the collections' documents
- // to the pubsub topics.
- addedTopics := []string{}
- for _, col := range storeCollections {
- keyChan, err := col.GetAllDocKeys(p.ctx)
- if err != nil {
- return err
- }
- for key := range keyChan {
- err := p.server.addPubSubTopic(key.Key.String(), true)
- if err != nil {
- return p.rollbackAddPubSubTopics(addedTopics, err)
- }
- addedTopics = append(addedTopics, key.Key.String())
- }
- }
-
- if err = txn.Commit(p.ctx); err != nil {
- err = p.rollbackAddPubSubTopics(addedTopics, err)
- return p.rollbackRemovePubSubTopics(removedTopics, err)
- }
-
- return nil
-}
-
-// GetAllP2PCollections gets all the collectionIDs from the pubsup topics
-func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) {
- log.Debug(ctx, "Received GetAllP2PCollections request")
-
- txn, err := p.db.NewTxn(p.ctx, false)
- if err != nil {
- return nil, err
- }
- store := p.db.WithTxn(txn)
-
- collections, err := store.GetAllP2PCollections(p.ctx)
- if err != nil {
- txn.Discard(p.ctx)
- return nil, err
- }
-
- return collections, txn.Commit(p.ctx)
-}
diff --git a/net/peer_collection.go b/net/peer_collection.go
new file mode 100644
index 0000000000..91e3f66154
--- /dev/null
+++ b/net/peer_collection.go
@@ -0,0 +1,173 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package net
+
+import (
+ "context"
+
+ dsq "github.com/ipfs/go-datastore/query"
+
+ "github.com/sourcenetwork/defradb/client"
+ "github.com/sourcenetwork/defradb/core"
+)
+
+const marker = byte(0xff)
+
+func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) error {
+ txn, err := p.db.NewTxn(p.ctx, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(p.ctx)
+
+ // first let's make sure the collections actually exists
+ storeCollections := []client.Collection{}
+ for _, col := range collectionIDs {
+ storeCol, err := p.db.WithTxn(txn).GetCollectionBySchemaID(p.ctx, col)
+ if err != nil {
+ return err
+ }
+ storeCollections = append(storeCollections, storeCol)
+ }
+
+ // Ensure we can add all the collections to the store on the transaction
+ // before adding to topics.
+ for _, col := range storeCollections {
+ key := core.NewP2PCollectionKey(col.SchemaID())
+ err = txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker})
+ if err != nil {
+ return err
+ }
+ }
+
+ // Add pubsub topics and remove them if we get an error.
+ addedTopics := []string{}
+ for _, col := range collectionIDs {
+ err = p.server.addPubSubTopic(col, true)
+ if err != nil {
+ return p.rollbackAddPubSubTopics(addedTopics, err)
+ }
+ addedTopics = append(addedTopics, col)
+ }
+
+ // After adding the collection topics, we remove the collections' documents
+ // from the pubsub topics to avoid receiving duplicate events.
+ removedTopics := []string{}
+ for _, col := range storeCollections {
+ keyChan, err := col.GetAllDocKeys(p.ctx)
+ if err != nil {
+ return err
+ }
+ for key := range keyChan {
+ err := p.server.removePubSubTopic(key.Key.String())
+ if err != nil {
+ return p.rollbackRemovePubSubTopics(removedTopics, err)
+ }
+ removedTopics = append(removedTopics, key.Key.String())
+ }
+ }
+
+ if err = txn.Commit(p.ctx); err != nil {
+ err = p.rollbackRemovePubSubTopics(removedTopics, err)
+ return p.rollbackAddPubSubTopics(addedTopics, err)
+ }
+
+ return nil
+}
+
+func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error {
+ txn, err := p.db.NewTxn(p.ctx, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(p.ctx)
+
+ // first let's make sure the collections actually exists
+ storeCollections := []client.Collection{}
+ for _, col := range collectionIDs {
+ storeCol, err := p.db.WithTxn(txn).GetCollectionBySchemaID(p.ctx, col)
+ if err != nil {
+ return err
+ }
+ storeCollections = append(storeCollections, storeCol)
+ }
+
+ // Ensure we can remove all the collections to the store on the transaction
+ // before adding to topics.
+ for _, col := range storeCollections {
+ key := core.NewP2PCollectionKey(col.SchemaID())
+ err = txn.Systemstore().Delete(ctx, key.ToDS())
+ if err != nil {
+ return err
+ }
+ }
+
+ // Remove pubsub topics and add them back if we get an error.
+ removedTopics := []string{}
+ for _, col := range collectionIDs {
+ err = p.server.removePubSubTopic(col)
+ if err != nil {
+ return p.rollbackRemovePubSubTopics(removedTopics, err)
+ }
+ removedTopics = append(removedTopics, col)
+ }
+
+ // After removing the collection topics, we add back the collections' documents
+ // to the pubsub topics.
+ addedTopics := []string{}
+ for _, col := range storeCollections {
+ keyChan, err := col.GetAllDocKeys(p.ctx)
+ if err != nil {
+ return err
+ }
+ for key := range keyChan {
+ err := p.server.addPubSubTopic(key.Key.String(), true)
+ if err != nil {
+ return p.rollbackAddPubSubTopics(addedTopics, err)
+ }
+ addedTopics = append(addedTopics, key.Key.String())
+ }
+ }
+
+ if err = txn.Commit(p.ctx); err != nil {
+ err = p.rollbackAddPubSubTopics(addedTopics, err)
+ return p.rollbackRemovePubSubTopics(removedTopics, err)
+ }
+
+ return nil
+}
+
+func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) {
+ txn, err := p.db.NewTxn(p.ctx, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(p.ctx)
+
+ query := dsq.Query{
+ Prefix: core.NewP2PCollectionKey("").ToString(),
+ }
+ results, err := txn.Systemstore().Query(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+
+ collectionIDs := []string{}
+ for result := range results.Next() {
+ key, err := core.NewP2PCollectionKeyFromString(result.Key)
+ if err != nil {
+ return nil, err
+ }
+ collectionIDs = append(collectionIDs, key.CollectionID)
+ }
+
+ return collectionIDs, nil
+}
diff --git a/net/peer_replicator.go b/net/peer_replicator.go
new file mode 100644
index 0000000000..ab3293625e
--- /dev/null
+++ b/net/peer_replicator.go
@@ -0,0 +1,207 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package net
+
+import (
+ "context"
+ "encoding/json"
+
+ dsq "github.com/ipfs/go-datastore/query"
+ "github.com/libp2p/go-libp2p/core/peer"
+ "github.com/libp2p/go-libp2p/core/peerstore"
+
+ "github.com/sourcenetwork/defradb/client"
+ "github.com/sourcenetwork/defradb/core"
+)
+
+func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ txn, err := p.db.NewTxn(ctx, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ if rep.Info.ID == p.host.ID() {
+ return ErrSelfTargetForReplicator
+ }
+ if err := rep.Info.ID.Validate(); err != nil {
+ return err
+ }
+
+ var collections []client.Collection
+ switch {
+ case len(rep.Schemas) > 0:
+ // if specific collections are chosen get them by name
+ for _, name := range rep.Schemas {
+ col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name)
+ if err != nil {
+ return NewErrReplicatorCollections(err)
+ }
+ collections = append(collections, col)
+ }
+
+ default:
+ // default to all collections
+ collections, err = p.db.WithTxn(txn).GetAllCollections(ctx)
+ if err != nil {
+ return NewErrReplicatorCollections(err)
+ }
+ }
+ rep.Schemas = nil
+
+ // Add the destination's peer multiaddress in the peerstore.
+ // This will be used during connection and stream creation by libp2p.
+ p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL)
+
+ var added []client.Collection
+ for _, col := range collections {
+ reps, exists := p.replicators[col.SchemaID()]
+ if !exists {
+ p.replicators[col.SchemaID()] = make(map[peer.ID]struct{})
+ }
+ if _, exists := reps[rep.Info.ID]; !exists {
+ // keep track of newly added collections so we don't
+ // push logs to a replicator peer multiple times.
+ p.replicators[col.SchemaID()][rep.Info.ID] = struct{}{}
+ added = append(added, col)
+ }
+ rep.Schemas = append(rep.Schemas, col.SchemaID())
+ }
+
+ // persist replicator to the datastore
+ repBytes, err := json.Marshal(rep)
+ if err != nil {
+ return err
+ }
+ key := core.NewReplicatorKey(rep.Info.ID.String())
+ err = txn.Systemstore().Put(ctx, key.ToDS(), repBytes)
+ if err != nil {
+ return err
+ }
+
+ // push all collection documents to the replicator peer
+ for _, col := range added {
+ keysCh, err := col.WithTxn(txn).GetAllDocKeys(ctx)
+ if err != nil {
+ return NewErrReplicatorDocKey(err, col.Name(), rep.Info.ID)
+ }
+ p.pushToReplicator(ctx, txn, col, keysCh, rep.Info.ID)
+ }
+
+ return txn.Commit(ctx)
+}
+
+func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ txn, err := p.db.NewTxn(ctx, false)
+ if err != nil {
+ return err
+ }
+ defer txn.Discard(ctx)
+
+ if rep.Info.ID == p.host.ID() {
+ return ErrSelfTargetForReplicator
+ }
+ if err := rep.Info.ID.Validate(); err != nil {
+ return err
+ }
+
+ var collections []client.Collection
+ switch {
+ case len(rep.Schemas) > 0:
+ // if specific collections are chosen get them by name
+ for _, name := range rep.Schemas {
+ col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name)
+ if err != nil {
+ return NewErrReplicatorCollections(err)
+ }
+ collections = append(collections, col)
+ }
+ // make sure the replicator exists in the datastore
+ key := core.NewReplicatorKey(rep.Info.ID.String())
+ _, err = txn.Systemstore().Get(ctx, key.ToDS())
+ if err != nil {
+ return err
+ }
+
+ default:
+ // default to all collections
+ collections, err = p.db.WithTxn(txn).GetAllCollections(ctx)
+ if err != nil {
+ return NewErrReplicatorCollections(err)
+ }
+ }
+ rep.Schemas = nil
+
+ schemaMap := make(map[string]struct{})
+ for _, col := range collections {
+ schemaMap[col.SchemaID()] = struct{}{}
+ }
+
+ // update replicators and add remaining schemas to rep
+ for key, val := range p.replicators {
+ if _, exists := val[rep.Info.ID]; exists {
+ if _, toDelete := schemaMap[key]; toDelete {
+ delete(p.replicators[key], rep.Info.ID)
+ } else {
+ rep.Schemas = append(rep.Schemas, key)
+ }
+ }
+ }
+
+ if len(rep.Schemas) == 0 {
+ // Remove the destination's peer multiaddress in the peerstore.
+ p.host.Peerstore().ClearAddrs(rep.Info.ID)
+ }
+
+ // persist the replicator to the store, deleting it if no schemas remain
+ key := core.NewReplicatorKey(rep.Info.ID.String())
+ if len(rep.Schemas) == 0 {
+ return txn.Systemstore().Delete(ctx, key.ToDS())
+ }
+ repBytes, err := json.Marshal(rep)
+ if err != nil {
+ return err
+ }
+ return txn.Systemstore().Put(ctx, key.ToDS(), repBytes)
+}
+
+func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) {
+ txn, err := p.db.NewTxn(ctx, true)
+ if err != nil {
+ return nil, err
+ }
+ defer txn.Discard(ctx)
+
+ // create collection system prefix query
+ query := dsq.Query{
+ Prefix: core.NewReplicatorKey("").ToString(),
+ }
+ results, err := txn.Systemstore().Query(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+
+ var reps []client.Replicator
+ for result := range results.Next() {
+ var rep client.Replicator
+ if err = json.Unmarshal(result.Value, &rep); err != nil {
+ return nil, err
+ }
+ reps = append(reps, rep)
+ }
+ return reps, nil
+}
diff --git a/net/peer_test.go b/net/peer_test.go
index 15a4a2e55a..92b7424e9f 100644
--- a/net/peer_test.go
+++ b/net/peer_test.go
@@ -201,7 +201,7 @@ func TestStartAndClose_NoError(t *testing.T) {
err := n.Start()
require.NoError(t, err)
- db.Close(ctx)
+ db.Close()
}
func TestStart_WithKnownPeer_NoError(t *testing.T) {
@@ -236,8 +236,8 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) {
err = n2.Start()
require.NoError(t, err)
- db1.Close(ctx)
- db2.Close(ctx)
+ db1.Close()
+ db2.Close()
}
func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) {
@@ -268,9 +268,7 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) {
t.Fatal(err)
}
n2.Bootstrap(addrs)
-
- err = n1.Close()
- require.NoError(t, err)
+ n1.Close()
// give time for n1 to close
time.Sleep(100 * time.Millisecond)
@@ -278,8 +276,8 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) {
err = n2.Start()
require.NoError(t, err)
- db1.Close(ctx)
- db2.Close(ctx)
+ db1.Close()
+ db2.Close()
}
func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) {
@@ -298,7 +296,7 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) {
err = n.Start()
require.ErrorIs(t, err, ErrNilUpdateChannel)
- db.Close(ctx)
+ db.Close()
}
func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) {
@@ -319,7 +317,7 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) {
err = n.Start()
require.ErrorContains(t, err, "cannot subscribe to a closed channel")
- db.Close(ctx)
+ db.Close()
}
func TestRegisterNewDocument_NoError(t *testing.T) {
@@ -412,7 +410,7 @@ func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) {
ctx := context.Background()
db, n := newTestNode(ctx, t)
- db.Close(ctx)
+ db.Close()
info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N")
require.NoError(t, err)
@@ -435,7 +433,7 @@ func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) {
Info: *info,
Schemas: []string{"User"},
})
- require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found")
+ require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found")
}
func TestSetReplicator_ForAllCollections_NoError(t *testing.T) {
@@ -488,10 +486,15 @@ func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) {
ctx := context.Background()
db, n := newTestNode(ctx, t)
- db.Close(ctx)
+ info := peer.AddrInfo{
+ ID: n.PeerID(),
+ Addrs: n.ListenAddrs(),
+ }
+
+ db.Close()
err := n.Peer.DeleteReplicator(ctx, client.Replicator{
- Info: n.PeerInfo(),
+ Info: info,
Schemas: []string{"User"},
})
require.ErrorContains(t, err, "datastore closed")
@@ -518,7 +521,7 @@ func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) {
Info: n2.PeerInfo(),
Schemas: []string{"User"},
})
- require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found")
+ require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found")
}
func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) {
@@ -603,7 +606,7 @@ func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) {
ctx := context.Background()
db, n := newTestNode(ctx, t)
- db.Close(ctx)
+ db.Close()
_, err := n.Peer.GetAllReplicators(ctx)
require.ErrorContains(t, err, "datastore closed")
@@ -613,7 +616,7 @@ func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) {
ctx := context.Background()
db, n := newTestNode(ctx, t)
- db.Close(ctx)
+ db.Close()
err := n.Peer.loadReplicators(ctx)
require.ErrorContains(t, err, "datastore closed")
diff --git a/net/server_test.go b/net/server_test.go
index 937b4c34b4..86ef798029 100644
--- a/net/server_test.go
+++ b/net/server_test.go
@@ -37,7 +37,8 @@ func TestNewServerSimple(t *testing.T) {
func TestNewServerWithDBClosed(t *testing.T) {
ctx := context.Background()
db, n := newTestNode(ctx, t)
- db.Close(ctx)
+ db.Close()
+
_, err := newServer(n.Peer, db)
require.ErrorIs(t, err, memory.ErrClosed)
}
diff --git a/tests/bench/collection/utils.go b/tests/bench/collection/utils.go
index 68df9531ed..dfb63fc86b 100644
--- a/tests/bench/collection/utils.go
+++ b/tests/bench/collection/utils.go
@@ -38,7 +38,7 @@ func runCollectionBenchGet(
if err != nil {
return err
}
- defer db.Close(ctx)
+ defer db.Close()
dockeys, err := benchutils.BackfillBenchmarkDB(
b,
@@ -123,7 +123,7 @@ func runCollectionBenchCreate(
if err != nil {
return err
}
- defer db.Close(ctx)
+ defer db.Close()
_, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync)
if err != nil {
@@ -149,7 +149,7 @@ func runCollectionBenchCreateMany(
if err != nil {
return err
}
- defer db.Close(ctx)
+ defer db.Close()
_, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync)
if err != nil {
diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go
index 2f70245b23..273cae0e0b 100644
--- a/tests/bench/query/planner/utils.go
+++ b/tests/bench/query/planner/utils.go
@@ -59,7 +59,7 @@ func runMakePlanBench(
if err != nil {
return err
}
- defer db.Close(ctx)
+ defer db.Close()
parser, err := buildParser(ctx, fixture)
if err != nil {
diff --git a/tests/bench/query/simple/utils.go b/tests/bench/query/simple/utils.go
index e7f374dc40..8c6f82579b 100644
--- a/tests/bench/query/simple/utils.go
+++ b/tests/bench/query/simple/utils.go
@@ -39,7 +39,7 @@ func RunQueryBenchGet(
if err != nil {
return err
}
- defer db.Close(ctx)
+ defer db.Close()
dockeys, err := benchutils.BackfillBenchmarkDB(
b,
diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go
index 6176273b02..8db991063e 100644
--- a/tests/clients/cli/wrapper.go
+++ b/tests/clients/cli/wrapper.go
@@ -27,41 +27,54 @@ import (
"github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/events"
"github.com/sourcenetwork/defradb/http"
+ "github.com/sourcenetwork/defradb/net"
)
-var _ client.DB = (*Wrapper)(nil)
+var _ client.P2P = (*Wrapper)(nil)
type Wrapper struct {
- db client.DB
- store client.Store
+ node *net.Node
cmd *cliWrapper
handler *http.Handler
httpServer *httptest.Server
}
-func NewWrapper(db client.DB) *Wrapper {
- handler := http.NewHandler(db, http.ServerOptions{})
+func NewWrapper(node *net.Node) *Wrapper {
+ handler := http.NewHandler(node, http.ServerOptions{})
httpServer := httptest.NewServer(handler)
cmd := newCliWrapper(httpServer.URL)
return &Wrapper{
- db: db,
- store: db,
+ node: node,
cmd: cmd,
httpServer: httpServer,
handler: handler,
}
}
+func (w *Wrapper) PeerInfo() peer.AddrInfo {
+ args := []string{"client", "p2p", "info"}
+
+ data, err := w.cmd.execute(context.Background(), args)
+ if err != nil {
+ panic(fmt.Sprintf("failed to get peer info: %v", err))
+ }
+ var info peer.AddrInfo
+ if err := json.Unmarshal(data, &info); err != nil {
+ panic(fmt.Sprintf("failed to get peer info: %v", err))
+ }
+ return info
+}
+
func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error {
args := []string{"client", "p2p", "replicator", "set"}
args = append(args, "--collection", strings.Join(rep.Schemas, ","))
- addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info)
+ info, err := json.Marshal(rep.Info)
if err != nil {
return err
}
- args = append(args, addrs[0].String())
+ args = append(args, string(info))
_, err = w.cmd.execute(ctx, args)
return err
@@ -69,12 +82,13 @@ func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) erro
func (w *Wrapper) DeleteReplicator(ctx context.Context, rep client.Replicator) error {
args := []string{"client", "p2p", "replicator", "delete"}
+ args = append(args, "--collection", strings.Join(rep.Schemas, ","))
- addrs, err := peer.AddrInfoToP2pAddrs(&rep.Info)
+ info, err := json.Marshal(rep.Info)
if err != nil {
return err
}
- args = append(args, addrs[0].String())
+ args = append(args, string(info))
_, err = w.cmd.execute(ctx, args)
return err
@@ -386,34 +400,45 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor
func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store {
return &Wrapper{
- db: w.db,
- store: w.db.WithTxn(tx),
- cmd: w.cmd.withTxn(tx),
+ node: w.node,
+ cmd: w.cmd.withTxn(tx),
}
}
func (w *Wrapper) Root() datastore.RootStore {
- return w.db.Root()
+ return w.node.Root()
}
func (w *Wrapper) Blockstore() blockstore.Blockstore {
- return w.db.Blockstore()
+ return w.node.Blockstore()
}
-func (w *Wrapper) Close(ctx context.Context) {
+func (w *Wrapper) Close() {
w.httpServer.CloseClientConnections()
w.httpServer.Close()
- w.db.Close(ctx)
+ w.node.Close()
}
func (w *Wrapper) Events() events.Events {
- return w.db.Events()
+ return w.node.Events()
}
func (w *Wrapper) MaxTxnRetries() int {
- return w.db.MaxTxnRetries()
+ return w.node.MaxTxnRetries()
}
func (w *Wrapper) PrintDump(ctx context.Context) error {
- return w.db.PrintDump(ctx)
+ return w.node.PrintDump(ctx)
+}
+
+func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) {
+ w.node.Bootstrap(addrs)
+}
+
+func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error {
+ return w.node.WaitForPushLogByPeerEvent(id)
+}
+
+func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error {
+ return w.node.WaitForPushLogFromPeerEvent(id)
}
diff --git a/tests/clients/clients.go b/tests/clients/clients.go
new file mode 100644
index 0000000000..10df14212f
--- /dev/null
+++ b/tests/clients/clients.go
@@ -0,0 +1,26 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package clients
+
+import (
+ "github.com/libp2p/go-libp2p/core/peer"
+
+ "github.com/sourcenetwork/defradb/client"
+)
+
+// Client implements the P2P interface along with a few other methods
+// required for testing.
+type Client interface {
+ client.P2P
+ Bootstrap([]peer.AddrInfo)
+ WaitForPushLogByPeerEvent(peer.ID) error
+ WaitForPushLogFromPeerEvent(peer.ID) error
+}
diff --git a/tests/clients/http/wrapper.go b/tests/clients/http/wrapper.go
index b5ef61c037..35cd55f466 100644
--- a/tests/clients/http/wrapper.go
+++ b/tests/clients/http/wrapper.go
@@ -15,26 +15,28 @@ import (
"net/http/httptest"
blockstore "github.com/ipfs/boxo/blockstore"
+ "github.com/libp2p/go-libp2p/core/peer"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/datastore"
"github.com/sourcenetwork/defradb/events"
"github.com/sourcenetwork/defradb/http"
+ "github.com/sourcenetwork/defradb/net"
)
-var _ client.DB = (*Wrapper)(nil)
+var _ client.P2P = (*Wrapper)(nil)
// Wrapper combines an HTTP client and server into a
// single struct that implements the client.DB interface.
type Wrapper struct {
- db client.DB
+ node *net.Node
handler *http.Handler
client *http.Client
httpServer *httptest.Server
}
-func NewWrapper(db client.DB) (*Wrapper, error) {
- handler := http.NewHandler(db, http.ServerOptions{})
+func NewWrapper(node *net.Node) (*Wrapper, error) {
+ handler := http.NewHandler(node, http.ServerOptions{})
httpServer := httptest.NewServer(handler)
client, err := http.NewClient(httpServer.URL)
@@ -43,13 +45,17 @@ func NewWrapper(db client.DB) (*Wrapper, error) {
}
return &Wrapper{
- db,
+ node,
handler,
client,
httpServer,
}, nil
}
+func (w *Wrapper) PeerInfo() peer.AddrInfo {
+ return w.client.PeerInfo()
+}
+
func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error {
return w.client.SetReplicator(ctx, rep)
}
@@ -155,27 +161,39 @@ func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store {
}
func (w *Wrapper) Root() datastore.RootStore {
- return w.db.Root()
+ return w.node.Root()
}
func (w *Wrapper) Blockstore() blockstore.Blockstore {
- return w.db.Blockstore()
+ return w.node.Blockstore()
}
-func (w *Wrapper) Close(ctx context.Context) {
+func (w *Wrapper) Close() {
w.httpServer.CloseClientConnections()
w.httpServer.Close()
- w.db.Close(ctx)
+ w.node.Close()
}
func (w *Wrapper) Events() events.Events {
- return w.db.Events()
+ return w.node.Events()
}
func (w *Wrapper) MaxTxnRetries() int {
- return w.db.MaxTxnRetries()
+ return w.node.MaxTxnRetries()
}
func (w *Wrapper) PrintDump(ctx context.Context) error {
- return w.db.PrintDump(ctx)
+ return w.node.PrintDump(ctx)
+}
+
+func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) {
+ w.node.Bootstrap(addrs)
+}
+
+func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error {
+ return w.node.WaitForPushLogByPeerEvent(id)
+}
+
+func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error {
+ return w.node.WaitForPushLogFromPeerEvent(id)
}
diff --git a/tests/integration/client.go b/tests/integration/client.go
new file mode 100644
index 0000000000..a6159900cc
--- /dev/null
+++ b/tests/integration/client.go
@@ -0,0 +1,85 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package tests
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ "github.com/sourcenetwork/defradb/net"
+ "github.com/sourcenetwork/defradb/tests/clients"
+ "github.com/sourcenetwork/defradb/tests/clients/cli"
+ "github.com/sourcenetwork/defradb/tests/clients/http"
+)
+
+const (
+ clientGoEnvName = "DEFRA_CLIENT_GO"
+ clientHttpEnvName = "DEFRA_CLIENT_HTTP"
+ clientCliEnvName = "DEFRA_CLIENT_CLI"
+)
+
+type ClientType string
+
+const (
+ // goClientType enables running the test suite using
+ // the go implementation of the client.DB interface.
+ GoClientType ClientType = "go"
+ // httpClientType enables running the test suite using
+ // the http implementation of the client.DB interface.
+ HTTPClientType ClientType = "http"
+ // cliClientType enables running the test suite using
+ // the cli implementation of the client.DB interface.
+ CLIClientType ClientType = "cli"
+)
+
+var (
+ httpClient bool
+ goClient bool
+ cliClient bool
+)
+
+func init() {
+ // We use environment variables instead of flags `go test ./...` throws for all packages
+ // that don't have the flag defined
+ httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName))
+ goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName))
+ cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName))
+
+ if !goClient && !httpClient && !cliClient {
+ // Default is to test go client type.
+ goClient = true
+ }
+}
+
+// setupClient returns the client implementation for the current
+// testing state. The client type on the test state is used to
+// select the client implementation to use.
+func setupClient(s *state, node *net.Node) (impl clients.Client, err error) {
+ switch s.clientType {
+ case HTTPClientType:
+ impl, err = http.NewWrapper(node)
+
+ case CLIClientType:
+ impl = cli.NewWrapper(node)
+
+ case GoClientType:
+ impl = node
+
+ default:
+ err = fmt.Errorf("invalid client type: %v", s.dbt)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ return
+}
diff --git a/tests/integration/db.go b/tests/integration/db.go
new file mode 100644
index 0000000000..561546cfef
--- /dev/null
+++ b/tests/integration/db.go
@@ -0,0 +1,151 @@
+// Copyright 2023 Democratized Data Foundation
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+package tests
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strconv"
+ "testing"
+
+ badger "github.com/dgraph-io/badger/v4"
+
+ "github.com/sourcenetwork/defradb/client"
+ badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4"
+ "github.com/sourcenetwork/defradb/datastore/memory"
+ "github.com/sourcenetwork/defradb/db"
+ changeDetector "github.com/sourcenetwork/defradb/tests/change_detector"
+)
+
+type DatabaseType string
+
+const (
+ memoryBadgerEnvName = "DEFRA_BADGER_MEMORY"
+ fileBadgerEnvName = "DEFRA_BADGER_FILE"
+ fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH"
+ inMemoryEnvName = "DEFRA_IN_MEMORY"
+)
+
+const (
+ badgerIMType DatabaseType = "badger-in-memory"
+ defraIMType DatabaseType = "defra-memory-datastore"
+ badgerFileType DatabaseType = "badger-file-system"
+)
+
+var (
+ badgerInMemory bool
+ badgerFile bool
+ inMemoryStore bool
+ databaseDir string
+)
+
+func init() {
+ // We use environment variables instead of flags `go test ./...` throws for all packages
+ // that don't have the flag defined
+ badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName))
+ badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName))
+ inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName))
+
+ if changeDetector.Enabled {
+ // Change detector only uses badger file db type.
+ badgerFile = true
+ badgerInMemory = false
+ inMemoryStore = false
+ } else if !badgerInMemory && !badgerFile && !inMemoryStore {
+ // Default is to test all but filesystem db types.
+ badgerFile = false
+ badgerInMemory = true
+ inMemoryStore = true
+ }
+}
+
+func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) {
+ opts := badgerds.Options{
+ Options: badger.DefaultOptions("").WithInMemory(true),
+ }
+ rootstore, err := badgerds.NewDatastore("", &opts)
+ if err != nil {
+ return nil, err
+ }
+ db, err := db.NewDB(ctx, rootstore, dbopts...)
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
+func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) {
+ db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...)
+ if err != nil {
+ return nil, err
+ }
+ return db, nil
+}
+
+func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) {
+ var dbPath string
+ switch {
+ case databaseDir != "":
+ // restarting database
+ dbPath = databaseDir
+
+ case changeDetector.Enabled:
+ // change detector
+ dbPath = changeDetector.DatabaseDir(t)
+
+ default:
+ // default test case
+ dbPath = t.TempDir()
+ }
+
+ opts := &badgerds.Options{
+ Options: badger.DefaultOptions(dbPath),
+ }
+ rootstore, err := badgerds.NewDatastore(dbPath, opts)
+ if err != nil {
+ return nil, "", err
+ }
+ db, err := db.NewDB(ctx, rootstore, dbopts...)
+ if err != nil {
+ return nil, "", err
+ }
+ return db, dbPath, err
+}
+
+// setupDatabase returns the database implementation for the current
+// testing state. The database type on the test state is used to
+// select the datastore implementation to use.
+func setupDatabase(s *state) (impl client.DB, path string, err error) {
+ dbopts := []db.Option{
+ db.WithUpdateEvents(),
+ db.WithLensPoolSize(lensPoolSize),
+ }
+
+ switch s.dbt {
+ case badgerIMType:
+ impl, err = NewBadgerMemoryDB(s.ctx, dbopts...)
+
+ case badgerFileType:
+ impl, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...)
+
+ case defraIMType:
+ impl, err = NewInMemoryDB(s.ctx, dbopts...)
+
+ default:
+ err = fmt.Errorf("invalid database type: %v", s.dbt)
+ }
+
+ if err != nil {
+ return nil, "", err
+ }
+ return
+}
diff --git a/tests/integration/explain.go b/tests/integration/explain.go
index 44c457c0f8..da7a1106e2 100644
--- a/tests/integration/explain.go
+++ b/tests/integration/explain.go
@@ -125,7 +125,7 @@ func executeExplainRequest(
}
for _, node := range getNodes(action.NodeID, s.nodes) {
- result := node.DB.ExecRequest(s.ctx, action.Request)
+ result := node.ExecRequest(s.ctx, action.Request)
assertExplainRequestResults(s, &result.GQL, action)
}
}
diff --git a/tests/integration/lens.go b/tests/integration/lens.go
index 317864ab3e..e69437d87b 100644
--- a/tests/integration/lens.go
+++ b/tests/integration/lens.go
@@ -57,7 +57,7 @@ func configureMigration(
action ConfigureMigration,
) {
for _, node := range getNodes(action.NodeID, s.nodes) {
- db := getStore(s, node.DB, action.TransactionID, action.ExpectedError)
+ db := getStore(s, node, action.TransactionID, action.ExpectedError)
err := db.SetMigration(s.ctx, action.LensConfig)
expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
@@ -71,7 +71,7 @@ func getMigrations(
action GetMigrations,
) {
for _, node := range getNodes(action.NodeID, s.nodes) {
- db := getStore(s, node.DB, action.TransactionID, "")
+ db := getStore(s, node, action.TransactionID, "")
configs, err := db.LensRegistry().Config(s.ctx)
require.NoError(s.t, err)
diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go
index 5470d8aee7..e01dd612cd 100644
--- a/tests/integration/net/order/utils.go
+++ b/tests/integration/net/order/utils.go
@@ -114,10 +114,7 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node
}
if err := n.Start(); err != nil {
- closeErr := n.Close()
- if closeErr != nil {
- return nil, nil, errors.Wrap(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr)
- }
+ n.Close()
return nil, nil, errors.Wrap("unable to start P2P listeners", err)
}
@@ -206,9 +203,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) {
log.Info(ctx, "cannot set a peer that hasn't been started. Skipping to next peer")
continue
}
+ peerInfo := nodes[p].PeerInfo()
peerAddresses = append(
peerAddresses,
- fmt.Sprintf("%s/p2p/%s", test.NodeConfig[p].Net.P2PAddress, nodes[p].PeerID()),
+ fmt.Sprintf("%s/p2p/%s", peerInfo.Addrs[0], peerInfo.ID),
)
}
cfg.Net.Peers = strings.Join(peerAddresses, ",")
@@ -260,7 +258,7 @@ func executeTestCase(t *testing.T, test P2PTestCase) {
continue
}
log.Info(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n))
- err := p.WaitForPushLogByPeerEvent(nodes[n].PeerID())
+ err := p.WaitForPushLogByPeerEvent(nodes[n].PeerInfo().ID)
require.NoError(t, err)
log.Info(ctx, fmt.Sprintf("Node %d synced", n2))
}
@@ -340,15 +338,14 @@ func executeTestCase(t *testing.T, test P2PTestCase) {
// clean up
for _, n := range nodes {
- if err := n.Close(); err != nil {
- log.Info(ctx, "node not closing as expected", logging.NewKV("Error", err.Error()))
- }
- n.DB.Close(ctx)
+ n.Close()
+ n.DB.Close()
}
}
func randomNetworkingConfig() *config.Config {
cfg := config.DefaultConfig()
cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0"
+ cfg.Net.RelayEnabled = false
return cfg
}
diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go
index 65d6cfd6ce..d6285f9bd0 100644
--- a/tests/integration/net/state/simple/replicator/with_create_test.go
+++ b/tests/integration/net/state/simple/replicator/with_create_test.go
@@ -12,6 +12,7 @@ package replicator
import (
"testing"
+ "time"
"github.com/sourcenetwork/immutable"
@@ -150,6 +151,55 @@ func TestP2POneToOneReplicatorDoesNotSyncFromTargetToSource(t *testing.T) {
testUtils.ExecuteTestCase(t, test)
}
+func TestP2POneToOneReplicatorDoesNotSyncFromDeletedReplicator(t *testing.T) {
+ test := testUtils.TestCase{
+ Actions: []any{
+ testUtils.RandomNetworkingConfig(),
+ testUtils.RandomNetworkingConfig(),
+ testUtils.SchemaUpdate{
+ Schema: `
+ type Users {
+ Name: String
+ Age: Int
+ }
+ `,
+ },
+ testUtils.ConfigureReplicator{
+ SourceNodeID: 0,
+ TargetNodeID: 1,
+ },
+ testUtils.DeleteReplicator{
+ SourceNodeID: 0,
+ TargetNodeID: 1,
+ },
+ testUtils.CreateDoc{
+ // Create John on the first (source) node only
+ NodeID: immutable.Some(0),
+ Doc: `{
+ "Name": "John",
+ "Age": 21
+ }`,
+ },
+ testUtils.WaitForSync{
+ // No documents should be synced
+ ExpectedTimeout: 100 * time.Millisecond,
+ },
+ testUtils.Request{
+ // Assert that John has not been synced to the second (target) node
+ NodeID: immutable.Some(1),
+ Request: `query {
+ Users {
+ Age
+ }
+ }`,
+ Results: []map[string]any{},
+ },
+ },
+ }
+
+ testUtils.ExecuteTestCase(t, test)
+}
+
func TestP2POneToManyReplicator(t *testing.T) {
test := testUtils.TestCase{
Actions: []any{
diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go
index e04e16bb0f..de02c0806c 100644
--- a/tests/integration/p2p.go
+++ b/tests/integration/p2p.go
@@ -11,15 +11,14 @@
package tests
import (
- "fmt"
"time"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/config"
"github.com/sourcenetwork/defradb/logging"
- "github.com/sourcenetwork/defradb/net"
- netutils "github.com/sourcenetwork/defradb/net/utils"
+ "github.com/sourcenetwork/defradb/tests/clients"
+ "github.com/libp2p/go-libp2p/core/peer"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -58,11 +57,20 @@ type ConfigureReplicator struct {
TargetNodeID int
}
+// DeleteReplicator deletes a directional replicator relationship between two nodes.
+type DeleteReplicator struct {
+ // SourceNodeID is the node ID (index) of the node from which the replicator should be deleted.
+ SourceNodeID int
+
+ // TargetNodeID is the node ID (index) of the node to which the replicator should be deleted.
+ TargetNodeID int
+}
+
const (
// NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted
// for a non-existent collection ID when used in actions that support this.
- NonExistentCollectionID int = -1
- NonExistentCollectionSchemaID = "NonExistentCollectionID"
+ NonExistentCollectionID int = -1
+ NonExistentCollectionSchemaID string = "NonExistentCollectionID"
)
// SubscribeToCollection sets up a subscription on the given node to the given collection.
@@ -121,7 +129,10 @@ type GetAllP2PCollections struct {
//
// For example you will likely wish to `WaitForSync` after creating a document in node 0 before querying
// node 1 to see if it has been replicated.
-type WaitForSync struct{}
+type WaitForSync struct {
+ // ExpectedTimeout is the duration to wait when expecting a timeout to occur.
+ ExpectedTimeout time.Duration
+}
// connectPeers connects two existing, started, nodes as peers. It returns a channel
// that will receive an empty struct upon sync completion of all expected peer-sync events.
@@ -136,13 +147,8 @@ func connectPeers(
time.Sleep(100 * time.Millisecond)
sourceNode := s.nodes[cfg.SourceNodeID]
targetNode := s.nodes[cfg.TargetNodeID]
- targetAddress := s.nodeAddresses[cfg.TargetNodeID]
- log.Info(s.ctx, "Parsing bootstrap peers", logging.NewKV("Peers", targetAddress))
- addrs, err := netutils.ParsePeers([]string{targetAddress})
- if err != nil {
- s.t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err)
- }
+ addrs := []peer.AddrInfo{targetNode.PeerInfo()}
log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs))
sourceNode.Bootstrap(addrs)
@@ -157,12 +163,16 @@ func setupPeerWaitSync(
s *state,
startIndex int,
cfg ConnectPeers,
- sourceNode *net.Node,
- targetNode *net.Node,
+ sourceNode clients.Client,
+ targetNode clients.Client,
) {
- nodeCollections := map[int][]int{}
sourceToTargetEvents := []int{0}
targetToSourceEvents := []int{0}
+
+ sourcePeerInfo := sourceNode.PeerInfo()
+ targetPeerInfo := targetNode.PeerInfo()
+
+ nodeCollections := map[int][]int{}
waitIndex := 0
for i := startIndex; i < len(s.testCase.Actions); i++ {
switch action := s.testCase.Actions[i].(type) {
@@ -247,11 +257,11 @@ func setupPeerWaitSync(
ready <- struct{}{}
for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ {
for i := 0; i < targetToSourceEvents[waitIndex]; i++ {
- err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID())
+ err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID)
require.NoError(s.t, err)
}
for i := 0; i < sourceToTargetEvents[waitIndex]; i++ {
- err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID())
+ err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID)
require.NoError(s.t, err)
}
nodeSynced <- struct{}{}
@@ -294,22 +304,39 @@ func configureReplicator(
sourceNode := s.nodes[cfg.SourceNodeID]
targetNode := s.nodes[cfg.TargetNodeID]
- err := sourceNode.Peer.SetReplicator(s.ctx, client.Replicator{
+ err := sourceNode.SetReplicator(s.ctx, client.Replicator{
Info: targetNode.PeerInfo(),
})
require.NoError(s.t, err)
setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode)
}
+func deleteReplicator(
+ s *state,
+ cfg DeleteReplicator,
+) {
+ sourceNode := s.nodes[cfg.SourceNodeID]
+ targetNode := s.nodes[cfg.TargetNodeID]
+
+ err := sourceNode.DeleteReplicator(s.ctx, client.Replicator{
+ Info: targetNode.PeerInfo(),
+ })
+ require.NoError(s.t, err)
+}
+
func setupReplicatorWaitSync(
s *state,
startIndex int,
cfg ConfigureReplicator,
- sourceNode *net.Node,
- targetNode *net.Node,
+ sourceNode clients.Client,
+ targetNode clients.Client,
) {
sourceToTargetEvents := []int{0}
targetToSourceEvents := []int{0}
+
+ sourcePeerInfo := sourceNode.PeerInfo()
+ targetPeerInfo := targetNode.PeerInfo()
+
docIDsSyncedToSource := map[int]struct{}{}
waitIndex := 0
currentDocID := 0
@@ -361,11 +388,11 @@ func setupReplicatorWaitSync(
ready <- struct{}{}
for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ {
for i := 0; i < targetToSourceEvents[waitIndex]; i++ {
- err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID())
+ err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID)
require.NoError(s.t, err)
}
for i := 0; i < sourceToTargetEvents[waitIndex]; i++ {
- err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID())
+ err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID)
require.NoError(s.t, err)
}
nodeSynced <- struct{}{}
@@ -466,14 +493,31 @@ func waitForSync(
s *state,
action WaitForSync,
) {
+ var timeout time.Duration
+ if action.ExpectedTimeout != 0 {
+ timeout = action.ExpectedTimeout
+ } else {
+ timeout = subscriptionTimeout * 10
+ }
+
for _, resultsChan := range s.syncChans {
select {
case <-resultsChan:
- continue
+ assert.True(
+ s.t,
+ action.ExpectedTimeout == 0,
+ "unexpected document has been synced",
+ s.testCase.Description,
+ )
// a safety in case the stream hangs - we don't want the tests to run forever.
- case <-time.After(subscriptionTimeout * 10):
- assert.Fail(s.t, "timeout occurred while waiting for data stream", s.testCase.Description)
+ case <-time.After(timeout):
+ assert.True(
+ s.t,
+ action.ExpectedTimeout != 0,
+ "timeout occurred while waiting for data stream",
+ s.testCase.Description,
+ )
}
}
}
@@ -482,6 +526,7 @@ func RandomNetworkingConfig() ConfigureNode {
return func() config.Config {
cfg := config.DefaultConfig()
cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/0"
+ cfg.Net.RelayEnabled = false
return *cfg
}
}
diff --git a/tests/integration/state.go b/tests/integration/state.go
index 5e47e0adfe..ca795a2492 100644
--- a/tests/integration/state.go
+++ b/tests/integration/state.go
@@ -15,11 +15,12 @@ import (
"testing"
"github.com/libp2p/go-libp2p/core/crypto"
+ "github.com/libp2p/go-libp2p/core/peer"
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/config"
"github.com/sourcenetwork/defradb/datastore"
- "github.com/sourcenetwork/defradb/net"
+ "github.com/sourcenetwork/defradb/tests/clients"
)
type state struct {
@@ -56,13 +57,13 @@ type state struct {
nodePrivateKeys []crypto.PrivKey
// The addresses of any nodes configured.
- nodeAddresses []string
+ nodeAddresses []peer.AddrInfo
// The configurations for any nodes
nodeConfigs []config.Config
// The nodes active in this test.
- nodes []*net.Node
+ nodes []clients.Client
// The paths to any file-based databases active in this test.
dbPaths []string
@@ -108,9 +109,9 @@ func newState(
subscriptionResultsChans: []chan func(){},
syncChans: []chan struct{}{},
nodePrivateKeys: []crypto.PrivKey{},
- nodeAddresses: []string{},
+ nodeAddresses: []peer.AddrInfo{},
nodeConfigs: []config.Config{},
- nodes: []*net.Node{},
+ nodes: []clients.Client{},
dbPaths: []string{},
collections: [][]client.Collection{},
collectionNames: collectionNames,
diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go
index 9e8c71792e..01c6c7c69f 100644
--- a/tests/integration/utils2.go
+++ b/tests/integration/utils2.go
@@ -16,12 +16,10 @@ import (
"fmt"
"os"
"reflect"
- "strconv"
"strings"
"testing"
"time"
- badger "github.com/dgraph-io/badger/v4"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/sourcenetwork/immutable"
"github.com/stretchr/testify/assert"
@@ -30,48 +28,14 @@ import (
"github.com/sourcenetwork/defradb/client"
"github.com/sourcenetwork/defradb/datastore"
badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4"
- "github.com/sourcenetwork/defradb/datastore/memory"
- "github.com/sourcenetwork/defradb/db"
"github.com/sourcenetwork/defradb/errors"
"github.com/sourcenetwork/defradb/logging"
"github.com/sourcenetwork/defradb/net"
changeDetector "github.com/sourcenetwork/defradb/tests/change_detector"
- "github.com/sourcenetwork/defradb/tests/clients/cli"
- "github.com/sourcenetwork/defradb/tests/clients/http"
+ "github.com/sourcenetwork/defradb/tests/clients"
)
-const (
- clientGoEnvName = "DEFRA_CLIENT_GO"
- clientHttpEnvName = "DEFRA_CLIENT_HTTP"
- clientCliEnvName = "DEFRA_CLIENT_CLI"
- memoryBadgerEnvName = "DEFRA_BADGER_MEMORY"
- fileBadgerEnvName = "DEFRA_BADGER_FILE"
- fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH"
- inMemoryEnvName = "DEFRA_IN_MEMORY"
- mutationTypeEnvName = "DEFRA_MUTATION_TYPE"
-)
-
-type DatabaseType string
-
-const (
- badgerIMType DatabaseType = "badger-in-memory"
- defraIMType DatabaseType = "defra-memory-datastore"
- badgerFileType DatabaseType = "badger-file-system"
-)
-
-type ClientType string
-
-const (
- // GoClientType enables running the test suite using
- // the go implementation of the client.DB interface.
- GoClientType ClientType = "go"
- // HTTPClientType enables running the test suite using
- // the http implementation of the client.DB interface.
- HTTPClientType ClientType = "http"
- // CLIClientType enables running the test suite using
- // the cli implementation of the client.DB interface.
- CLIClientType ClientType = "cli"
-)
+const mutationTypeEnvName = "DEFRA_MUTATION_TYPE"
// The MutationType that tests will run using.
//
@@ -101,15 +65,8 @@ const (
)
var (
- log = logging.MustNewLogger("tests.integration")
- badgerInMemory bool
- badgerFile bool
- inMemoryStore bool
- httpClient bool
- goClient bool
- cliClient bool
- mutationType MutationType
- databaseDir string
+ log = logging.MustNewLogger("tests.integration")
+ mutationType MutationType
)
const (
@@ -122,14 +79,7 @@ const (
func init() {
// We use environment variables instead of flags `go test ./...` throws for all packages
- // that don't have the flag defined
- httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName))
- goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName))
- cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName))
- badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName))
- badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName))
- inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName))
-
+ // that don't have the flag defined
if value, ok := os.LookupEnv(mutationTypeEnvName); ok {
mutationType = MutationType(value)
} else {
@@ -138,23 +88,6 @@ func init() {
// mutation type.
mutationType = CollectionSaveMutationType
}
-
- if !goClient && !httpClient && !cliClient {
- // Default is to test go client type.
- goClient = true
- }
-
- if changeDetector.Enabled {
- // Change detector only uses badger file db type.
- badgerFile = true
- badgerInMemory = false
- inMemoryStore = false
- } else if !badgerInMemory && !badgerFile && !inMemoryStore {
- // Default is to test all but filesystem db types.
- badgerFile = false
- badgerInMemory = true
- inMemoryStore = true
- }
}
// AssertPanic asserts that the code inside the specified PanicTestFunc panics.
@@ -178,107 +111,6 @@ func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool {
return assert.Panics(t, f, "expected a panic, but none found.")
}
-func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) {
- opts := badgerds.Options{
- Options: badger.DefaultOptions("").WithInMemory(true),
- }
- rootstore, err := badgerds.NewDatastore("", &opts)
- if err != nil {
- return nil, err
- }
- db, err := db.NewDB(ctx, rootstore, dbopts...)
- if err != nil {
- return nil, err
- }
- return db, nil
-}
-
-func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) {
- db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...)
- if err != nil {
- return nil, err
- }
- return db, nil
-}
-
-func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) {
- var dbPath string
- switch {
- case databaseDir != "":
- // restarting database
- dbPath = databaseDir
-
- case changeDetector.Enabled:
- // change detector
- dbPath = changeDetector.DatabaseDir(t)
-
- default:
- // default test case
- dbPath = t.TempDir()
- }
-
- opts := &badgerds.Options{
- Options: badger.DefaultOptions(dbPath),
- }
- rootstore, err := badgerds.NewDatastore(dbPath, opts)
- if err != nil {
- return nil, "", err
- }
- db, err := db.NewDB(ctx, rootstore, dbopts...)
- if err != nil {
- return nil, "", err
- }
- return db, dbPath, err
-}
-
-// GetDatabase returns the database implementation for the current
-// testing state. The database type and client type on the test state
-// are used to select the datastore and client implementation to use.
-func GetDatabase(s *state) (cdb client.DB, path string, err error) {
- dbopts := []db.Option{
- db.WithUpdateEvents(),
- db.WithLensPoolSize(lensPoolSize),
- }
-
- switch s.dbt {
- case badgerIMType:
- cdb, err = NewBadgerMemoryDB(s.ctx, dbopts...)
-
- case badgerFileType:
- cdb, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...)
-
- case defraIMType:
- cdb, err = NewInMemoryDB(s.ctx, dbopts...)
-
- default:
- err = fmt.Errorf("invalid database type: %v", s.dbt)
- }
-
- if err != nil {
- return nil, "", err
- }
-
- switch s.clientType {
- case HTTPClientType:
- cdb, err = http.NewWrapper(cdb)
-
- case CLIClientType:
- cdb = cli.NewWrapper(cdb)
-
- case GoClientType:
- return
-
- default:
- err = fmt.Errorf("invalid client type: %v", s.dbt)
- }
-
- if err != nil {
- return nil, "", err
- }
-
- return
-}
-
// ExecuteTestCase executes the given TestCase against the configured database
// instances.
//
@@ -404,6 +236,9 @@ func performAction(
case ConfigureReplicator:
configureReplicator(s, action)
+ case DeleteReplicator:
+ deleteReplicator(s, action)
+
case SubscribeToCollection:
subscribeToCollection(s, action)
@@ -585,23 +420,19 @@ func closeNodes(
s *state,
) {
for _, node := range s.nodes {
- if node.Peer != nil {
- err := node.Close()
- require.NoError(s.t, err)
- }
- node.DB.Close(s.ctx)
+ node.Close()
}
}
// getNodes gets the set of applicable nodes for the given nodeID.
//
// If nodeID has a value it will return that node only, otherwise all nodes will be returned.
-func getNodes(nodeID immutable.Option[int], nodes []*net.Node) []*net.Node {
+func getNodes(nodeID immutable.Option[int], nodes []clients.Client) []clients.Client {
if !nodeID.HasValue() {
return nodes
}
- return []*net.Node{nodes[nodeID.Value()]}
+ return []clients.Client{nodes[nodeID.Value()]}
}
// getNodeCollections gets the set of applicable collections for the given nodeID.
@@ -729,12 +560,13 @@ func setStartingNodes(
// If nodes have not been explicitly configured via actions, setup a default one.
if !hasExplicitNode {
- db, path, err := GetDatabase(s)
+ db, path, err := setupDatabase(s)
require.Nil(s.t, err)
- s.nodes = append(s.nodes, &net.Node{
- DB: db,
- })
+ c, err := setupClient(s, &net.Node{DB: db})
+ require.Nil(s.t, err)
+
+ s.nodes = append(s.nodes, c)
s.dbPaths = append(s.dbPaths, path)
}
}
@@ -752,16 +584,16 @@ func restartNodes(
for i := len(s.nodes) - 1; i >= 0; i-- {
originalPath := databaseDir
databaseDir = s.dbPaths[i]
- db, _, err := GetDatabase(s)
+ db, _, err := setupDatabase(s)
require.Nil(s.t, err)
databaseDir = originalPath
if len(s.nodeConfigs) == 0 {
// If there are no explicit node configuration actions the node will be
// basic (i.e. no P2P stuff) and can be yielded now.
- s.nodes[i] = &net.Node{
- DB: db,
- }
+ c, err := setupClient(s, &net.Node{DB: db})
+ require.NoError(s.t, err)
+ s.nodes[i] = c
continue
}
@@ -769,7 +601,8 @@ func restartNodes(
cfg := s.nodeConfigs[i]
// We need to make sure the node is configured with its old address, otherwise
// a new one may be selected and reconnnection to it will fail.
- cfg.Net.P2PAddress = strings.Split(s.nodeAddresses[i], "/p2p/")[0]
+ cfg.Net.P2PAddress = s.nodeAddresses[i].Addrs[0].String()
+
var n *net.Node
n, err = net.NewNode(
s.ctx,
@@ -780,14 +613,13 @@ func restartNodes(
require.NoError(s.t, err)
if err := n.Start(); err != nil {
- closeErr := n.Close()
- if closeErr != nil {
- s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr)
- }
+ n.Close()
require.NoError(s.t, err)
}
- s.nodes[i] = n
+ c, err := setupClient(s, n)
+ require.NoError(s.t, err)
+ s.nodes[i] = c
}
// The index of the action after the last wait action before the current restart action.
@@ -838,7 +670,7 @@ func refreshCollections(
for nodeID, node := range s.nodes {
s.collections[nodeID] = make([]client.Collection, len(s.collectionNames))
- allCollections, err := node.DB.GetAllCollections(s.ctx)
+ allCollections, err := node.GetAllCollections(s.ctx)
require.Nil(s.t, err)
for i, collectionName := range s.collectionNames {
@@ -867,7 +699,7 @@ func configureNode(
}
cfg := action()
- db, path, err := GetDatabase(s) //disable change dector, or allow it?
+ db, path, err := setupDatabase(s) //disable change dector, or allow it?
require.NoError(s.t, err)
privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
@@ -883,20 +715,20 @@ func configureNode(
)
require.NoError(s.t, err)
+ log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", n.PeerInfo()))
if err := n.Start(); err != nil {
- closeErr := n.Close()
- if closeErr != nil {
- s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr)
- }
+ n.Close()
require.NoError(s.t, err)
}
- address := fmt.Sprintf("%s/p2p/%s", n.ListenAddrs()[0].String(), n.PeerID())
- s.nodeAddresses = append(s.nodeAddresses, address)
+ s.nodeAddresses = append(s.nodeAddresses, n.PeerInfo())
s.nodeConfigs = append(s.nodeConfigs, cfg)
s.nodePrivateKeys = append(s.nodePrivateKeys, privateKey)
- s.nodes = append(s.nodes, n)
+ c, err := setupClient(s, n)
+ require.NoError(s.t, err)
+
+ s.nodes = append(s.nodes, c)
s.dbPaths = append(s.dbPaths, path)
}
@@ -1083,7 +915,7 @@ func updateSchema(
action SchemaUpdate,
) {
for _, node := range getNodes(action.NodeID, s.nodes) {
- _, err := node.DB.AddSchema(s.ctx, action.Schema)
+ _, err := node.AddSchema(s.ctx, action.Schema)
expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised)
@@ -1106,7 +938,7 @@ func patchSchema(
setAsDefaultVersion = true
}
- err := node.DB.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion)
+ err := node.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion)
expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised)
@@ -1122,7 +954,7 @@ func setDefaultSchemaVersion(
action SetDefaultSchemaVersion,
) {
for _, node := range getNodes(action.NodeID, s.nodes) {
- err := node.DB.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID)
+ err := node.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID)
expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised)
@@ -1138,7 +970,7 @@ func createDoc(
s *state,
action CreateDoc,
) {
- var mutation func(*state, CreateDoc, *net.Node, []client.Collection) (*client.Document, error)
+ var mutation func(*state, CreateDoc, client.P2P, []client.Collection) (*client.Document, error)
switch mutationType {
case CollectionSaveMutationType:
@@ -1179,7 +1011,7 @@ func createDoc(
func createDocViaColSave(
s *state,
action CreateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) (*client.Document, error) {
var err error
@@ -1194,7 +1026,7 @@ func createDocViaColSave(
func createDocViaColCreate(
s *state,
action CreateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) (*client.Document, error) {
var err error
@@ -1209,7 +1041,7 @@ func createDocViaColCreate(
func createDocViaGQL(
s *state,
action CreateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) (*client.Document, error) {
collection := collections[action.CollectionID]
@@ -1227,7 +1059,7 @@ func createDocViaGQL(
escapedJson,
)
- db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError)
+ db := getStore(s, node, immutable.None[int](), action.ExpectedError)
result := db.ExecRequest(s.ctx, request)
if len(result.GQL.Errors) > 0 {
@@ -1279,7 +1111,7 @@ func updateDoc(
s *state,
action UpdateDoc,
) {
- var mutation func(*state, UpdateDoc, *net.Node, []client.Collection) error
+ var mutation func(*state, UpdateDoc, client.P2P, []client.Collection) error
switch mutationType {
case CollectionSaveMutationType:
@@ -1309,7 +1141,7 @@ func updateDoc(
func updateDocViaColSave(
s *state,
action UpdateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) error {
doc := s.documents[action.CollectionID][action.DocID]
@@ -1325,7 +1157,7 @@ func updateDocViaColSave(
func updateDocViaColUpdate(
s *state,
action UpdateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) error {
doc := s.documents[action.CollectionID][action.DocID]
@@ -1341,7 +1173,7 @@ func updateDocViaColUpdate(
func updateDocViaGQL(
s *state,
action UpdateDoc,
- node *net.Node,
+ node client.P2P,
collections []client.Collection,
) error {
doc := s.documents[action.CollectionID][action.DocID]
@@ -1361,7 +1193,7 @@ func updateDocViaGQL(
escapedJson,
)
- db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError)
+ db := getStore(s, node, immutable.None[int](), action.ExpectedError)
result := db.ExecRequest(s.ctx, request)
if len(result.GQL.Errors) > 0 {
@@ -1461,7 +1293,7 @@ func backupExport(
err := withRetry(
actionNodes,
nodeID,
- func() error { return node.DB.BasicExport(s.ctx, &action.Config) },
+ func() error { return node.BasicExport(s.ctx, &action.Config) },
)
expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
@@ -1491,7 +1323,7 @@ func backupImport(
err := withRetry(
actionNodes,
nodeID,
- func() error { return node.DB.BasicImport(s.ctx, action.Filepath) },
+ func() error { return node.BasicImport(s.ctx, action.Filepath) },
)
expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError)
}
@@ -1506,11 +1338,11 @@ func backupImport(
// about this in our tests so we just retry a few times until it works (or the
// retry limit is breached - important incase this is a different error)
func withRetry(
- nodes []*net.Node,
+ nodes []clients.Client,
nodeID int,
action func() error,
) error {
- for i := 0; i < nodes[nodeID].DB.MaxTxnRetries(); i++ {
+ for i := 0; i < nodes[nodeID].MaxTxnRetries(); i++ {
err := action()
if err != nil && errors.Is(err, badgerds.ErrTxnConflict) {
time.Sleep(100 * time.Millisecond)
@@ -1577,7 +1409,7 @@ func executeRequest(
) {
var expectedErrorRaised bool
for nodeID, node := range getNodes(action.NodeID, s.nodes) {
- db := getStore(s, node.DB, action.TransactionID, action.ExpectedError)
+ db := getStore(s, node, action.TransactionID, action.ExpectedError)
result := db.ExecRequest(s.ctx, action.Request)
anyOfByFieldKey := map[docFieldKey][]any{}
@@ -1610,7 +1442,7 @@ func executeSubscriptionRequest(
subscriptionAssert := make(chan func())
for _, node := range getNodes(action.NodeID, s.nodes) {
- result := node.DB.ExecRequest(s.ctx, action.Request)
+ result := node.ExecRequest(s.ctx, action.Request)
if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) {
return
}
@@ -1791,7 +1623,7 @@ func assertIntrospectionResults(
action IntrospectionRequest,
) bool {
for _, node := range getNodes(action.NodeID, s.nodes) {
- result := node.DB.ExecRequest(s.ctx, action.Request)
+ result := node.ExecRequest(s.ctx, action.Request)
if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) {
return true
@@ -1822,7 +1654,7 @@ func assertClientIntrospectionResults(
action ClientIntrospectionRequest,
) bool {
for _, node := range getNodes(action.NodeID, s.nodes) {
- result := node.DB.ExecRequest(s.ctx, action.Request)
+ result := node.ExecRequest(s.ctx, action.Request)
if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) {
return true