From 1f5b81fb614a6095baa1193f8e4140306911f02c Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 11 Jul 2024 10:32:18 +0000 Subject: [PATCH 01/21] test: use the regular libp2p host (#565) This removes dependencies on swarm/testing and the blank host. 1. swarm/testing really shouldn't be used at all except for internal libp2p stuff. 2. The blank host should only be used in _very_ special cases (autonat, mostly). --- blacklist_test.go | 6 +-- discovery_test.go | 4 +- floodsub_test.go | 85 ++++++++++++++++------------------- gossipsub_connmgr_test.go | 17 ++++--- gossipsub_feat_test.go | 2 +- gossipsub_matchfn_test.go | 2 +- gossipsub_spam_test.go | 12 ++--- gossipsub_test.go | 88 ++++++++++++++----------------------- pubsub_test.go | 21 ++++++++- randomsub_test.go | 8 ++-- subscription_filter_test.go | 2 +- topic_test.go | 32 +++++++------- trace_test.go | 12 ++--- validation_builtin_test.go | 4 +- validation_test.go | 12 ++--- 15 files changed, 151 insertions(+), 156 deletions(-) diff --git a/blacklist_test.go b/blacklist_test.go index 045a9c85..a19c46e4 100644 --- a/blacklist_test.go +++ b/blacklist_test.go @@ -38,7 +38,7 @@ func TestBlacklist(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -66,7 +66,7 @@ func TestBlacklist2(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -99,7 +99,7 @@ func TestBlacklist3(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) psubs[1].BlacklistPeer(hosts[0].ID()) diff --git a/discovery_test.go b/discovery_test.go index 66c9c80e..f539e69d 100644 --- a/discovery_test.go +++ b/discovery_test.go @@ -134,7 +134,7 @@ func TestSimpleDiscovery(t *testing.T) { server := newDiscoveryServer() discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(1 * time.Minute)} - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := make([]*PubSub, numHosts) topicHandlers := make([]*Topic, numHosts) @@ -234,7 +234,7 @@ func TestGossipSubDiscoveryAfterBootstrap(t *testing.T) { discOpts := []discovery.Option{discovery.Limit(numHosts), discovery.TTL(ttl)} // Put the pubsub clients into two partitions - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := make([]*PubSub, numHosts) topicHandlers := make([]*Topic, numHosts) diff --git a/floodsub_test.go b/floodsub_test.go index 35dd0d53..0168b15f 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -20,9 +20,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - "github.com/libp2p/go-msgio/protoio" ) @@ -42,19 +39,6 @@ func checkMessageRouting(t *testing.T, topic string, pubs []*PubSub, subs []*Sub } } -func getNetHosts(t *testing.T, ctx context.Context, n int) []host.Host { - var out []host.Host - - for i := 0; i < n; i++ { - netw := swarmt.GenSwarm(t) - h := bhost.NewBlankHost(netw) - t.Cleanup(func() { h.Close() }) - out = append(out, h) - } - - return out -} - func connect(t *testing.T, a, b host.Host) { pinfo := a.Peerstore().PeerInfo(a.ID()) err := b.Connect(context.Background(), pinfo) @@ -151,7 +135,7 @@ func assertNeverReceives(t *testing.T, ch *Subscription, timeout time.Duration) func TestBasicFloodsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubs(ctx, hosts) @@ -193,7 +177,7 @@ func TestMultihops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 6) + hosts := getDefaultHosts(t, 6) psubs := getPubsubs(ctx, hosts) @@ -235,7 +219,7 @@ func TestReconnects(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) @@ -309,7 +293,7 @@ func TestNoConnection(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts) @@ -334,7 +318,7 @@ func TestSelfReceive(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - host := getNetHosts(t, ctx, 1)[0] + host := getDefaultHosts(t, 1)[0] psub, err := NewFloodSub(ctx, host) if err != nil { @@ -368,7 +352,7 @@ func TestOneToOne(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -401,7 +385,7 @@ func TestTreeTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -464,7 +448,7 @@ func TestFloodSubPluggableProtocol(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub", "/lsr/floodsub") psubB := mustCreatePubSub(ctx, t, hosts[1], "/esh/floodsub") @@ -496,7 +480,7 @@ func TestFloodSubPluggableProtocol(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubA := mustCreatePubSub(ctx, t, hosts[0], "/esh/floodsub") psubB := mustCreatePubSub(ctx, t, hosts[1], "/lsr/floodsub") @@ -551,7 +535,7 @@ func TestSubReporting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - host := getNetHosts(t, ctx, 1)[0] + host := getDefaultHosts(t, 1)[0] psub, err := NewFloodSub(ctx, host) if err != nil { t.Fatal(err) @@ -593,7 +577,7 @@ func TestPeerTopicReporting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 4) + hosts := getDefaultHosts(t, 4) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -650,7 +634,7 @@ func TestSubscribeMultipleTimes(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -695,7 +679,7 @@ func TestPeerDisconnect(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -743,7 +727,7 @@ func TestWithNoSigning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithNoAuthor(), WithMessageIdFn(func(pmsg *pb.Message) string { // silly content-based test message-ID: just use the data as whole return base64.URLEncoding.EncodeToString(pmsg.Data) @@ -788,7 +772,7 @@ func TestWithSigning(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithStrictSignatureVerification(true)) connect(t, hosts[0], hosts[1]) @@ -830,7 +814,7 @@ func TestImproperlySignedMessageRejected(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) adversary := hosts[0] honestPeer := hosts[1] @@ -948,7 +932,7 @@ func TestMessageSender(t *testing.T) { const topic = "foobar" - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) var msgs []*Subscription @@ -1002,7 +986,7 @@ func TestConfigurableMaxMessageSize(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) // use a 4mb limit; default is 1mb; we'll test with a 2mb payload. psubs := getPubsubs(ctx, hosts, WithMaxMessageSize(1<<22)) @@ -1045,7 +1029,7 @@ func TestAnnounceRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps := getPubsub(ctx, hosts[0]) watcher := &announceWatcher{} hosts[1].SetStreamHandler(FloodSubID, watcher.handleStream) @@ -1117,7 +1101,7 @@ func TestPubsubWithAssortedOptions(t *testing.T) { return string(hash[:]) } - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts, WithMessageIdFn(hashMsgID), WithPeerOutboundQueueSize(10), @@ -1152,8 +1136,7 @@ func TestWithInvalidMessageAuthor(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h.Close() + h := getDefaultHosts(t, 1)[0] _, err := NewFloodSub(ctx, h, WithMessageAuthor("bogotr0n")) if err == nil { t.Fatal("expected error") @@ -1168,10 +1151,9 @@ func TestPreconnectedNodes(t *testing.T) { defer cancel() // Create hosts - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] opts := []Option{WithDiscovery(&dummyDiscovery{})} // Setup first PubSub @@ -1229,10 +1211,9 @@ func TestDedupInboundStreams(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] _, err := NewFloodSub(ctx, h1) if err != nil { @@ -1247,18 +1228,30 @@ func TestDedupInboundStreams(t *testing.T) { if err != nil { t.Fatal(err) } + _, err = s1.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) s2, err := h2.NewStream(ctx, h1.ID(), FloodSubID) if err != nil { t.Fatal(err) } + _, err = s2.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) s3, err := h2.NewStream(ctx, h1.ID(), FloodSubID) if err != nil { t.Fatal(err) } + _, err = s3.Read(nil) // force protocol negotiation to complete + if err != nil { + t.Fatal(err) + } time.Sleep(100 * time.Millisecond) // check that s1 and s2 have been reset diff --git a/gossipsub_connmgr_test.go b/gossipsub_connmgr_test.go index 0a97312c..accf57dd 100644 --- a/gossipsub_connmgr_test.go +++ b/gossipsub_connmgr_test.go @@ -7,10 +7,10 @@ import ( "github.com/benbjohnson/clock" "github.com/libp2p/go-libp2p/core/host" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" "github.com/libp2p/go-libp2p/p2p/net/connmgr" ) @@ -70,9 +70,14 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { t.Fatal(err) } - netw := swarmt.GenSwarm(t) - defer netw.Close() - h := bhost.NewBlankHost(netw, bhost.WithConnectionManager(connmgrs[i])) + h, err := libp2p.New( + libp2p.ResourceManager(&network.NullResourceManager{}), + libp2p.ConnectionManager(connmgrs[i]), + ) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { h.Close() }) honestHosts[i] = h honestPeers[h.ID()] = struct{}{} } @@ -83,7 +88,7 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { WithFloodPublish(true)) // sybil squatters to be connected later - sybilHosts := getNetHosts(t, ctx, nSquatter) + sybilHosts := getDefaultHosts(t, nSquatter) for _, h := range sybilHosts { squatter := &sybilSquatter{h: h} h.SetStreamHandler(GossipSubID_v10, squatter.handleStream) diff --git a/gossipsub_feat_test.go b/gossipsub_feat_test.go index 712f16df..93cfb4c3 100644 --- a/gossipsub_feat_test.go +++ b/gossipsub_feat_test.go @@ -42,7 +42,7 @@ func TestGossipSubCustomProtocols(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) gsubs := getGossipsubs(ctx, hosts[:2], WithGossipSubProtocols(protos, features)) fsub := getPubsub(ctx, hosts[2]) diff --git a/gossipsub_matchfn_test.go b/gossipsub_matchfn_test.go index 279f0d34..4d688d25 100644 --- a/gossipsub_matchfn_test.go +++ b/gossipsub_matchfn_test.go @@ -17,7 +17,7 @@ func TestGossipSubMatchingFn(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 4) + h := getDefaultHosts(t, 4) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithProtocolMatchFn(protocolNameMatch), WithGossipSubProtocols([]protocol.ID{customsubA100, GossipSubID_v11}, GossipSubDefaultFeatures)), getGossipsub(ctx, h[1], WithProtocolMatchFn(protocolNameMatch), WithGossipSubProtocols([]protocol.ID{customsubA101Beta}, GossipSubDefaultFeatures)), diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index f31daaab..3ccb1ab4 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -25,7 +25,7 @@ func TestGossipsubAttackSpamIWANT(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -142,7 +142,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -195,6 +195,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, }) + sub := sub go func() { defer cancel() @@ -292,7 +293,7 @@ func TestGossipsubAttackGRAFTNonExistentTopic(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -376,7 +377,7 @@ func TestGossipsubAttackGRAFTDuringBackoff(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] @@ -430,6 +431,7 @@ func TestGossipsubAttackGRAFTDuringBackoff(t *testing.T) { Control: &pb.ControlMessage{Graft: graft}, }) + sub := sub go func() { defer cancel() @@ -617,7 +619,7 @@ func TestGossipsubAttackInvalidMessageSpam(t *testing.T) { defer cancel() // Create legitimate and attacker hosts - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) legit := hosts[0] attacker := hosts[1] diff --git a/gossipsub_test.go b/gossipsub_test.go index 5933f4b5..8c9419d8 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -13,16 +13,12 @@ import ( pb "github.com/libp2p/go-libp2p-pubsub/pb" - "github.com/libp2p/go-libp2p" "github.com/libp2p/go-libp2p/core/host" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/record" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - "github.com/libp2p/go-msgio/protoio" ) @@ -45,7 +41,7 @@ func getGossipsubs(ctx context.Context, hs []host.Host, opts ...Option) []*PubSu func TestSparseGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -86,7 +82,7 @@ func TestSparseGossipsub(t *testing.T) { func TestDenseGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -127,7 +123,7 @@ func TestDenseGossipsub(t *testing.T) { func TestGossipsubFanout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -196,7 +192,7 @@ func TestGossipsubFanout(t *testing.T) { func TestGossipsubFanoutMaintenance(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -281,7 +277,7 @@ func TestGossipsubFanoutExpiry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) @@ -340,7 +336,7 @@ func TestGossipsubFanoutExpiry(t *testing.T) { func TestGossipsubGossip(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -388,7 +384,7 @@ func TestGossipsubGossipPiggyback(t *testing.T) { t.Skip("test no longer relevant; gossip propagation has become eager") ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -457,7 +453,7 @@ func TestGossipsubGossipPropagation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) hosts1 := hosts[:GossipSubD+1] @@ -537,7 +533,7 @@ func TestGossipsubGossipPropagation(t *testing.T) { func TestGossipsubPrune(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -586,7 +582,7 @@ func TestGossipsubPrune(t *testing.T) { func TestGossipsubPruneBackoffTime(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) // App specific score that we'll change later. currentScoreForHost0 := int32(0) @@ -684,7 +680,7 @@ func TestGossipsubPruneBackoffTime(t *testing.T) { func TestGossipsubGraft(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -729,7 +725,7 @@ func TestGossipsubGraft(t *testing.T) { func TestGossipsubRemovePeer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) @@ -779,7 +775,7 @@ func TestGossipsubGraftPruneRetry(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) denseConnect(t, hosts) @@ -829,7 +825,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) denseConnect(t, hosts) @@ -910,7 +906,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { func TestMixedGossipsub(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 30) + hosts := getDefaultHosts(t, 30) gsubs := getGossipsubs(ctx, hosts[:20]) fsubs := getPubsubs(ctx, hosts[20:]) @@ -954,7 +950,7 @@ func TestGossipsubMultihops(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 6) + hosts := getDefaultHosts(t, 6) psubs := getGossipsubs(ctx, hosts) @@ -997,7 +993,7 @@ func TestGossipsubTreeTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getGossipsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -1061,7 +1057,7 @@ func TestGossipsubStarTopology(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithPeerExchange(true), WithFloodPublish(true)) // configure the center of the star with a very low D @@ -1223,7 +1219,7 @@ func TestGossipsubDirectPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithDirectConnectTicks(2)), getGossipsub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}}), WithDirectConnectTicks(2)), @@ -1287,7 +1283,7 @@ func TestGossipSubPeerFilter(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0], WithPeerFilter(func(pid peer.ID, topic string) bool { return pid == h[1].ID() @@ -1329,7 +1325,7 @@ func TestGossipsubDirectPeersFanout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1], WithDirectPeers([]peer.AddrInfo{{ID: h[2].ID(), Addrs: h[2].Addrs()}})), @@ -1416,7 +1412,7 @@ func TestGossipsubFloodPublish(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithFloodPublish(true)) // build the star @@ -1451,7 +1447,7 @@ func TestGossipsubEnoughPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts) for _, ps := range psubs { @@ -1500,7 +1496,7 @@ func TestGossipsubCustomParams(t *testing.T) { wantedMaxPendingConns := 23 params.MaxPendingConnections = wantedMaxPendingConns - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getGossipsubs(ctx, hosts, WithGossipSubParams(params)) @@ -1529,7 +1525,7 @@ func TestGossipsubNegativeScore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithPeerScore( &PeerScoreParams{ @@ -1613,7 +1609,7 @@ func TestGossipsubScoreValidatorEx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getGossipsubs(ctx, hosts, WithPeerScore( &PeerScoreParams{ @@ -1701,8 +1697,7 @@ func TestGossipsubPiggybackControl(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h.Close() + h := getDefaultHosts(t, 1)[0] ps := getGossipsub(ctx, h) blah := peer.ID("bogotr0n") @@ -1750,7 +1745,7 @@ func TestGossipsubMultipleGraftTopics(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getGossipsubs(ctx, hosts) sparseConnect(t, hosts) @@ -1818,7 +1813,7 @@ func TestGossipsubOpportunisticGrafting(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 50) + hosts := getDefaultHosts(t, 50) // pubsubs for the first 10 hosts psubs := getGossipsubs(ctx, hosts[:10], WithFloodPublish(true), @@ -1919,7 +1914,7 @@ func TestGossipSubLeaveTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 2) + h := getDefaultHosts(t, 2) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1]), @@ -1990,7 +1985,7 @@ func TestGossipSubJoinTopic(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h := getNetHosts(t, ctx, 3) + h := getDefaultHosts(t, 3) psubs := []*PubSub{ getGossipsub(ctx, h[0]), getGossipsub(ctx, h[1]), @@ -2072,7 +2067,7 @@ func TestGossipsubPeerScoreInspect(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) inspector := &mockPeerScoreInspector{} psub1 := getGossipsub(ctx, hosts[0], @@ -2132,7 +2127,7 @@ func TestGossipsubPeerScoreResetTopicParams(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) ps := getGossipsub(ctx, hosts[0], WithPeerScore( @@ -2199,7 +2194,7 @@ func TestGossipsubRPCFragmentation(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps := getGossipsub(ctx, hosts[0]) // make a fake peer that requests everything through IWANT gossip @@ -2553,21 +2548,6 @@ func FuzzAppendOrMergeRPC(f *testing.F) { }) } -func getDefaultHosts(t *testing.T, n int) []host.Host { - var out []host.Host - - for i := 0; i < n; i++ { - h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) - if err != nil { - t.Fatal(err) - } - t.Cleanup(func() { h.Close() }) - out = append(out, h) - } - - return out -} - func TestGossipsubManagesAnAddressBook(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/pubsub_test.go b/pubsub_test.go index 4a033159..245a69df 100644 --- a/pubsub_test.go +++ b/pubsub_test.go @@ -4,13 +4,32 @@ import ( "context" "testing" "time" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p/core/host" + "github.com/libp2p/go-libp2p/core/network" ) +func getDefaultHosts(t *testing.T, n int) []host.Host { + var out []host.Host + + for i := 0; i < n; i++ { + h, err := libp2p.New(libp2p.ResourceManager(&network.NullResourceManager{})) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { h.Close() }) + out = append(out, h) + } + + return out +} + // See https://github.com/libp2p/go-libp2p-pubsub/issues/426 func TestPubSubRemovesBlacklistedPeer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) bl := NewMapBlacklist() diff --git a/randomsub_test.go b/randomsub_test.go index 8eb640ea..5c817b7c 100644 --- a/randomsub_test.go +++ b/randomsub_test.go @@ -40,7 +40,7 @@ func TestRandomsubSmall(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getRandomsubs(ctx, hosts, 10) connectAll(t, hosts) @@ -77,7 +77,7 @@ func TestRandomsubBig(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 50) + hosts := getDefaultHosts(t, 50) psubs := getRandomsubs(ctx, hosts, 50) connectSome(t, hosts, 12) @@ -114,7 +114,7 @@ func TestRandomsubMixed(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 40) + hosts := getDefaultHosts(t, 40) fsubs := getPubsubs(ctx, hosts[:10]) rsubs := getRandomsubs(ctx, hosts[10:], 30) psubs := append(fsubs, rsubs...) @@ -153,7 +153,7 @@ func TestRandomsubEnoughPeers(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 40) + hosts := getDefaultHosts(t, 40) fsubs := getPubsubs(ctx, hosts[:10]) rsubs := getRandomsubs(ctx, hosts[10:], 30) psubs := append(fsubs, rsubs...) diff --git a/subscription_filter_test.go b/subscription_filter_test.go index 8a4fe4db..7ee54a86 100644 --- a/subscription_filter_test.go +++ b/subscription_filter_test.go @@ -150,7 +150,7 @@ func TestSubscriptionFilterRPC(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) ps1 := getPubsub(ctx, hosts[0], WithSubscriptionFilter(NewAllowlistSubscriptionFilter("test1", "test2"))) ps2 := getPubsub(ctx, hosts[1], WithSubscriptionFilter(NewAllowlistSubscriptionFilter("test2", "test3"))) diff --git a/topic_test.go b/topic_test.go index 9ad3146d..a27113b2 100644 --- a/topic_test.go +++ b/topic_test.go @@ -99,7 +99,7 @@ func testTopicCloseWithOpenResource(t *testing.T, openResource func(topic *Topic const numHosts = 1 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Try create and cancel topic @@ -139,7 +139,7 @@ func TestTopicReuse(t *testing.T) { const numHosts = 2 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) sender := getPubsub(ctx, hosts[0], WithDiscovery(&dummyDiscovery{})) receiver := getPubsub(ctx, hosts[1]) @@ -233,7 +233,7 @@ func TestTopicEventHandlerCancel(t *testing.T) { const numHosts = 5 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Try create and cancel topic @@ -265,7 +265,7 @@ func TestSubscriptionJoinNotification(t *testing.T) { const numLateSubscribers = 10 const numHosts = 20 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), "foobar") evts := getTopicEvts(topics) @@ -331,7 +331,7 @@ func TestSubscriptionLeaveNotification(t *testing.T) { defer cancel() const numHosts = 20 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) psubs := getPubsubs(ctx, hosts) topics := getTopics(psubs, "foobar") evts := getTopicEvts(topics) @@ -416,7 +416,7 @@ func TestSubscriptionManyNotifications(t *testing.T) { const topic = "foobar" const numHosts = 33 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) evts := getTopicEvts(topics) @@ -521,7 +521,7 @@ func TestSubscriptionNotificationSubUnSub(t *testing.T) { const topic = "foobar" const numHosts = 35 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) for i := 1; i < numHosts; i++ { @@ -539,7 +539,7 @@ func TestTopicRelay(t *testing.T) { const topic = "foobar" const numHosts = 5 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) // [0.Rel] - [1.Rel] - [2.Sub] @@ -603,7 +603,7 @@ func TestTopicRelayReuse(t *testing.T) { const topic = "foobar" const numHosts = 1 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) pubsubs := getPubsubs(ctx, hosts) topics := getTopics(pubsubs, topic) @@ -670,7 +670,7 @@ func TestTopicRelayOnClosedTopic(t *testing.T) { const topic = "foobar" const numHosts = 1 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) err := topics[0].Close() @@ -690,7 +690,7 @@ func TestProducePanic(t *testing.T) { const numHosts = 5 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) ps := getPubsub(ctx, hosts[0]) // Create topic @@ -792,7 +792,7 @@ func TestMinTopicSizeNoDiscovery(t *testing.T) { const numHosts = 3 topicID := "foobar" - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) sender := getPubsub(ctx, hosts[0]) receiver1 := getPubsub(ctx, hosts[1]) @@ -872,7 +872,7 @@ func TestWithTopicMsgIdFunction(t *testing.T) { const topicA, topicB = "foobarA", "foobarB" const numHosts = 2 - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) pubsubs := getPubsubs(ctx, hosts, WithMessageIdFn(func(pmsg *pb.Message) string { hash := sha256.Sum256(pmsg.Data) return string(hash[:]) @@ -932,7 +932,7 @@ func TestTopicPublishWithKeyInvalidParameters(t *testing.T) { const numHosts = 5 virtualPeer := tnet.RandPeerNetParamsOrFatal(t) - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) t.Run("nil sign private key should error", func(t *testing.T) { @@ -959,7 +959,7 @@ func TestTopicRelayPublishWithKey(t *testing.T) { const numHosts = 5 virtualPeer := tnet.RandPeerNetParamsOrFatal(t) - hosts := getNetHosts(t, ctx, numHosts) + hosts := getDefaultHosts(t, numHosts) topics := getTopics(getPubsubs(ctx, hosts), topic) // [0.Rel] - [1.Rel] - [2.Sub] @@ -1026,7 +1026,7 @@ func TestWithLocalPublication(t *testing.T) { const topic = "test" - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) pubsubs := getPubsubs(ctx, hosts) topics := getTopics(pubsubs, topic) connectAll(t, hosts) diff --git a/trace_test.go b/trace_test.go index fb8cb56d..7717a7e2 100644 --- a/trace_test.go +++ b/trace_test.go @@ -17,9 +17,6 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" - bhost "github.com/libp2p/go-libp2p/p2p/host/blank" - swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" - "github.com/libp2p/go-msgio/protoio" ) @@ -27,7 +24,7 @@ func testWithTracer(t *testing.T, tracer EventTracer) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getGossipsubs(ctx, hosts, WithEventTracer(tracer), // to bootstrap from star topology @@ -302,10 +299,9 @@ func TestRemoteTracer(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - h1 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - h2 := bhost.NewBlankHost(swarmt.GenSwarm(t)) - defer h1.Close() - defer h2.Close() + hosts := getDefaultHosts(t, 2) + h1 := hosts[0] + h2 := hosts[1] mrt := &mockRemoteTracer{} h1.SetStreamHandler(RemoteTracerProtoID, mrt.handleStream) diff --git a/validation_builtin_test.go b/validation_builtin_test.go index df406f26..267cc6be 100644 --- a/validation_builtin_test.go +++ b/validation_builtin_test.go @@ -38,7 +38,7 @@ func testBasicSeqnoValidator(t *testing.T, ttl time.Duration) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubsWithOptionC(ctx, hosts, func(i int) Option { return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) @@ -86,7 +86,7 @@ func TestBasicSeqnoValidatorReplay(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 20) + hosts := getDefaultHosts(t, 20) psubs := getPubsubsWithOptionC(ctx, hosts[:19], func(i int) Option { return WithDefaultValidator(NewBasicSeqnoValidator(newMockPeerMetadataStore())) diff --git a/validation_test.go b/validation_test.go index b56e7677..0a09f70b 100644 --- a/validation_test.go +++ b/validation_test.go @@ -15,7 +15,7 @@ func TestRegisterUnregisterValidator(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getPubsubs(ctx, hosts) err := psubs[0].RegisterTopicValidator("foo", func(context.Context, peer.ID, *Message) bool { @@ -40,7 +40,7 @@ func TestRegisterValidatorEx(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 3) + hosts := getDefaultHosts(t, 3) psubs := getPubsubs(ctx, hosts) err := psubs[0].RegisterTopicValidator("test", @@ -69,7 +69,7 @@ func TestValidate(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -123,7 +123,7 @@ func TestValidate2(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 1) + hosts := getDefaultHosts(t, 1) psubs := getPubsubs(ctx, hosts) topic := "foobar" @@ -201,7 +201,7 @@ func TestValidateOverload(t *testing.T) { for tci, tc := range tcs { t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) { - hosts := getNetHosts(t, ctx, 2) + hosts := getDefaultHosts(t, 2) psubs := getPubsubs(ctx, hosts) connect(t, hosts[0], hosts[1]) @@ -273,7 +273,7 @@ func TestValidateAssortedOptions(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - hosts := getNetHosts(t, ctx, 10) + hosts := getDefaultHosts(t, 10) psubs := getPubsubs(ctx, hosts, WithValidateQueueSize(10), WithValidateThrottle(10), From b23b3ee559c5989b9b98aa94f95ed53fcf9033d4 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 11 Jul 2024 11:46:35 +0000 Subject: [PATCH 02/21] Switch to the new peer notify mechanism (#564) 1. Only listen for peers added and identify events. 2. Remove the old "Limited" check. Peers only show up as "Connected" if they have non-limited connections. 3. Don't bother listening for new connections directly and/or connectivity changes. We'll get a new identify event per new connection regardless. fixes #546 --- notify.go | 75 --------------------------------- peer_notify.go | 112 +++++++++++++++++++++++++++++++++++++++++++++++++ pubsub.go | 6 +-- 3 files changed, 115 insertions(+), 78 deletions(-) delete mode 100644 notify.go create mode 100644 peer_notify.go diff --git a/notify.go b/notify.go deleted file mode 100644 index f560d398..00000000 --- a/notify.go +++ /dev/null @@ -1,75 +0,0 @@ -package pubsub - -import ( - "github.com/libp2p/go-libp2p/core/network" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -var _ network.Notifiee = (*PubSubNotif)(nil) - -type PubSubNotif PubSub - -func (p *PubSubNotif) OpenedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) ClosedStream(n network.Network, s network.Stream) { -} - -func (p *PubSubNotif) Connected(n network.Network, c network.Conn) { - // ignore transient connections - if c.Stat().Limited { - return - } - - go func() { - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - p.newPeersPend[c.RemotePeer()] = struct{}{} - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } - }() -} - -func (p *PubSubNotif) Disconnected(n network.Network, c network.Conn) { -} - -func (p *PubSubNotif) Listen(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) ListenClose(n network.Network, _ ma.Multiaddr) { -} - -func (p *PubSubNotif) Initialize() { - isTransient := func(pid peer.ID) bool { - for _, c := range p.host.Network().ConnsToPeer(pid) { - if !c.Stat().Limited { - return false - } - } - - return true - } - - p.newPeersPrioLk.RLock() - p.newPeersMx.Lock() - for _, pid := range p.host.Network().Peers() { - if isTransient(pid) { - continue - } - - p.newPeersPend[pid] = struct{}{} - } - p.newPeersMx.Unlock() - p.newPeersPrioLk.RUnlock() - - select { - case p.newPeers <- struct{}{}: - default: - } -} diff --git a/peer_notify.go b/peer_notify.go new file mode 100644 index 00000000..44aceeef --- /dev/null +++ b/peer_notify.go @@ -0,0 +1,112 @@ +package pubsub + +import ( + "context" + + "github.com/libp2p/go-libp2p/core/event" + "github.com/libp2p/go-libp2p/core/network" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/protocol" +) + +func (ps *PubSub) watchForNewPeers(ctx context.Context) { + // We don't bother subscribing to "connectivity" events because we always run identify after + // every new connection. + sub, err := ps.host.EventBus().Subscribe([]interface{}{ + &event.EvtPeerIdentificationCompleted{}, + &event.EvtPeerProtocolsUpdated{}, + }) + if err != nil { + log.Errorf("failed to subscribe to peer identification events: %v", err) + return + } + defer sub.Close() + + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + for _, pid := range ps.host.Network().Peers() { + if ps.host.Network().Connectedness(pid) != network.Connected { + continue + } + ps.newPeersPend[pid] = struct{}{} + } + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } + + var supportsProtocol func(protocol.ID) bool + if ps.protoMatchFunc != nil { + var supportedProtocols []func(protocol.ID) bool + for _, proto := range ps.rt.Protocols() { + + supportedProtocols = append(supportedProtocols, ps.protoMatchFunc(proto)) + } + supportsProtocol = func(proto protocol.ID) bool { + for _, fn := range supportedProtocols { + if (fn)(proto) { + return true + } + } + return false + } + } else { + supportedProtocols := make(map[protocol.ID]struct{}) + for _, proto := range ps.rt.Protocols() { + supportedProtocols[proto] = struct{}{} + } + supportsProtocol = func(proto protocol.ID) bool { + _, ok := supportedProtocols[proto] + return ok + } + } + + for ctx.Err() == nil { + var ev any + select { + case <-ctx.Done(): + return + case ev = <-sub.Out(): + } + + var protos []protocol.ID + var peer peer.ID + switch ev := ev.(type) { + case event.EvtPeerIdentificationCompleted: + peer = ev.Peer + protos = ev.Protocols + case event.EvtPeerProtocolsUpdated: + peer = ev.Peer + protos = ev.Added + default: + continue + } + + // We don't bother checking connectivity (connected and non-"limited") here because + // we'll check when actually handling the new peer. + + for _, p := range protos { + if supportsProtocol(p) { + ps.notifyNewPeer(peer) + break + } + } + } + +} + +func (ps *PubSub) notifyNewPeer(peer peer.ID) { + ps.newPeersPrioLk.RLock() + ps.newPeersMx.Lock() + ps.newPeersPend[peer] = struct{}{} + ps.newPeersMx.Unlock() + ps.newPeersPrioLk.RUnlock() + + select { + case ps.newPeers <- struct{}{}: + default: + } +} diff --git a/pubsub.go b/pubsub.go index c4ecae65..24c297dd 100644 --- a/pubsub.go +++ b/pubsub.go @@ -327,14 +327,12 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option h.SetStreamHandler(id, ps.handleNewStream) } } - h.Network().Notify((*PubSubNotif)(ps)) + go ps.watchForNewPeers(ctx) ps.val.Start(ps) go ps.processLoop(ctx) - (*PubSubNotif)(ps).Initialize() - return ps, nil } @@ -687,6 +685,8 @@ func (p *PubSub) handlePendingPeers() { p.newPeersPrioLk.Unlock() for pid := range newPeers { + // Make sure we have a non-limited connection. We do this late because we may have + // disconnected in the meantime. if p.host.Network().Connectedness(pid) != network.Connected { continue } From 093f13ce165f007a2375cffb93205b1b21701d62 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Thu, 11 Jul 2024 12:50:34 +0000 Subject: [PATCH 03/21] test: test notify protocols updated (#567) Signed-off-by: gfanton <8671905+gfanton@users.noreply.github.com> Co-authored-by: gfanton <8671905+gfanton@users.noreply.github.com> --- notify_test.go | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 notify_test.go diff --git a/notify_test.go b/notify_test.go new file mode 100644 index 00000000..fa5b755a --- /dev/null +++ b/notify_test.go @@ -0,0 +1,76 @@ +package pubsub + +import ( + "context" + "testing" + "time" + + "github.com/libp2p/go-libp2p/p2p/protocol/identify" +) + +func TestNotifyPeerProtocolsUpdated(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + hosts := getDefaultHosts(t, 2) + + // Initialize id services. + { + ids1, err := identify.NewIDService(hosts[0]) + if err != nil { + t.Fatal(err) + } + ids1.Start() + defer ids1.Close() + + ids2, err := identify.NewIDService(hosts[1]) + if err != nil { + t.Fatal(err) + } + ids2.Start() + defer ids2.Close() + } + + psubs0 := getPubsub(ctx, hosts[0]) + connect(t, hosts[0], hosts[1]) + // Delay to make sure that peers are connected. + <-time.After(time.Millisecond * 100) + psubs1 := getPubsub(ctx, hosts[1]) + + // Pubsub 0 joins topic "test". + topic0, err := psubs0.Join("test") + if err != nil { + t.Fatal(err) + } + defer topic0.Close() + + sub0, err := topic0.Subscribe() + if err != nil { + t.Fatal(err) + } + defer sub0.Cancel() + + // Pubsub 1 joins topic "test". + topic1, err := psubs1.Join("test") + if err != nil { + t.Fatal(err) + } + defer topic1.Close() + + sub1, err := topic1.Subscribe() + if err != nil { + t.Fatal(err) + } + defer sub1.Cancel() + + // Delay before checking results (similar to most tests). + <-time.After(time.Millisecond * 100) + + if len(topic0.ListPeers()) == 0 { + t.Fatalf("topic0 should at least have 1 peer") + } + + if len(topic1.ListPeers()) == 0 { + t.Fatalf("topic1 should at least have 1 peer") + } +} From e508d8643ddb0b9557dd97a827380f267a18082e Mon Sep 17 00:00:00 2001 From: Sorin Stanculeanu <34831323+sstanculeanu@users.noreply.github.com> Date: Thu, 11 Jul 2024 18:25:52 +0300 Subject: [PATCH 04/21] added missing Close call on the AddrBook member of GossipSubRouter (#568) --- gossipsub.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/gossipsub.go b/gossipsub.go index a36049f7..3121a210 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -3,6 +3,7 @@ package pubsub import ( "context" "fmt" + "io" "math/rand" "sort" "time" @@ -543,6 +544,13 @@ func (gs *GossipSubRouter) manageAddrBook() { for { select { case <-gs.p.ctx.Done(): + cabCloser, ok := gs.cab.(io.Closer) + if ok { + errClose := cabCloser.Close() + if errClose != nil { + log.Warnf("failed to close addr book: %v", errClose) + } + } return case ev := <-sub.Out(): switch ev := ev.(type) { From 88c73f4a89bbf7bb1ca04af386577d5ea589e7f8 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 05/21] chore: add or force update .github/workflows/go-test.yml --- .github/workflows/go-test.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/go-test.yml diff --git a/.github/workflows/go-test.yml b/.github/workflows/go-test.yml new file mode 100644 index 00000000..505ece58 --- /dev/null +++ b/.github/workflows/go-test.yml @@ -0,0 +1,21 @@ +name: Go Test + +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + merge_group: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + go-test: + uses: ipdxco/unified-github-workflows/.github/workflows/go-test.yml@v1.0 + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} From db70c1d6784547fbdf3ed5195088f1eafc29c586 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 06/21] chore: add or force update .github/workflows/go-check.yml --- .github/workflows/go-check.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/go-check.yml diff --git a/.github/workflows/go-check.yml b/.github/workflows/go-check.yml new file mode 100644 index 00000000..724ef8e3 --- /dev/null +++ b/.github/workflows/go-check.yml @@ -0,0 +1,19 @@ +name: Go Checks + +on: + pull_request: + push: + branches: ["master"] + workflow_dispatch: + merge_group: + +permissions: + contents: read + +concurrency: + group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name == 'push' && github.sha || github.ref }} + cancel-in-progress: true + +jobs: + go-check: + uses: ipdxco/unified-github-workflows/.github/workflows/go-check.yml@v1.0 From 7c54be0278015a172a520232d698ade3e05c8f57 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 07/21] chore: add or force update .github/workflows/releaser.yml --- .github/workflows/releaser.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/releaser.yml diff --git a/.github/workflows/releaser.yml b/.github/workflows/releaser.yml new file mode 100644 index 00000000..a2b2a044 --- /dev/null +++ b/.github/workflows/releaser.yml @@ -0,0 +1,21 @@ +name: Releaser + +on: + push: + paths: ["version.json"] + workflow_dispatch: + +permissions: + contents: write + +concurrency: + group: ${{ github.workflow }}-${{ github.sha }} + cancel-in-progress: true + +jobs: + releaser: + uses: ipdxco/unified-github-workflows/.github/workflows/releaser.yml@v1.0 + with: + sources: '["version.json"]' + secrets: + UCI_GITHUB_TOKEN: ${{ secrets.UCI_GITHUB_TOKEN }} From b32ed641c0da8bee16dc5956ddaae9ba98878242 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 08/21] chore: add or force update .github/workflows/release-check.yml --- .github/workflows/release-check.yml | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/workflows/release-check.yml diff --git a/.github/workflows/release-check.yml b/.github/workflows/release-check.yml new file mode 100644 index 00000000..681b5ef1 --- /dev/null +++ b/.github/workflows/release-check.yml @@ -0,0 +1,21 @@ +name: Release Checker + +on: + pull_request_target: + paths: ["version.json"] + types: [ opened, synchronize, reopened, labeled, unlabeled ] + workflow_dispatch: + +permissions: + contents: write + pull-requests: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + release-check: + uses: ipdxco/unified-github-workflows/.github/workflows/release-check.yml@v1.0 + with: + sources: '["version.json"]' From ca1b3dabb768821a48d93b3db36be3f8ef1b7d25 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 09/21] chore: add or force update .github/workflows/tagpush.yml --- .github/workflows/tagpush.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/workflows/tagpush.yml diff --git a/.github/workflows/tagpush.yml b/.github/workflows/tagpush.yml new file mode 100644 index 00000000..5ef3fb9e --- /dev/null +++ b/.github/workflows/tagpush.yml @@ -0,0 +1,18 @@ +name: Tag Push Checker + +on: + push: + tags: + - v* + +permissions: + contents: read + issues: write + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + releaser: + uses: ipdxco/unified-github-workflows/.github/workflows/tagpush.yml@v1.0 From 03952ea658cf46d66f3224e51fa4960c02092de2 Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 5 Aug 2024 17:25:23 +0000 Subject: [PATCH 10/21] chore: add or force update version.json --- version.json | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 version.json diff --git a/version.json b/version.json new file mode 100644 index 00000000..ea22ea59 --- /dev/null +++ b/version.json @@ -0,0 +1,3 @@ +{ + "version": "v0.11.0" +} From 435b99e317fabd8b495a3e8ef648253a546f0d21 Mon Sep 17 00:00:00 2001 From: galargh Date: Mon, 5 Aug 2024 19:38:07 +0200 Subject: [PATCH 11/21] chore: go fmt --- subscription_filter_test.go | 16 ++++++++-------- validation_builtin_test.go | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/subscription_filter_test.go b/subscription_filter_test.go index 7ee54a86..0057cdcf 100644 --- a/subscription_filter_test.go +++ b/subscription_filter_test.go @@ -19,15 +19,15 @@ func TestBasicSubscriptionFilter(t *testing.T) { topic3 := "test3" yes := true subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic3, Subscribe: &yes, }, @@ -108,24 +108,24 @@ func TestSubscriptionFilterDeduplication(t *testing.T) { yes := true no := false subs := []*pb.RPC_SubOpts{ - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic1, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &yes, }, - &pb.RPC_SubOpts{ + { Topicid: &topic2, Subscribe: &no, }, - &pb.RPC_SubOpts{ + { Topicid: &topic3, Subscribe: &yes, }, diff --git a/validation_builtin_test.go b/validation_builtin_test.go index 267cc6be..bca8774c 100644 --- a/validation_builtin_test.go +++ b/validation_builtin_test.go @@ -246,7 +246,7 @@ func (r *replayActor) replay(msg *pb.Message) { var peers []peer.ID r.mx.Lock() - for p, _ := range r.out { + for p := range r.out { if rng.Intn(2) > 0 { peers = append(peers, p) } From 8f56e8c97ae2df675d33d7a624fc5315ff61d8f8 Mon Sep 17 00:00:00 2001 From: galargh Date: Mon, 5 Aug 2024 19:43:28 +0200 Subject: [PATCH 12/21] chore: update rand usage --- backoff.go | 1 - floodsub_test.go | 11 ++++++----- gossipsub_spam_test.go | 2 +- gossipsub_test.go | 35 ++++++++++++++++++----------------- 4 files changed, 25 insertions(+), 24 deletions(-) diff --git a/backoff.go b/backoff.go index 4909e153..99ca7fd0 100644 --- a/backoff.go +++ b/backoff.go @@ -43,7 +43,6 @@ func newBackoff(ctx context.Context, sizeThreshold int, cleanupInterval time.Dur info: make(map[peer.ID]*backoffHistory), } - rand.Seed(time.Now().UnixNano()) // used for jitter go b.cleanupLoop(ctx) return b diff --git a/floodsub_test.go b/floodsub_test.go index 0168b15f..8a2db35b 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -3,11 +3,12 @@ package pubsub import ( "bytes" "context" + crand "crypto/rand" "crypto/sha256" "encoding/base64" "fmt" "io" - "math/rand" + mrand "math/rand" "sort" "sync" "testing" @@ -25,7 +26,7 @@ import ( func checkMessageRouting(t *testing.T, topic string, pubs []*PubSub, subs []*Subscription) { data := make([]byte, 16) - rand.Read(data) + crand.Read(data) for _, p := range pubs { err := p.Publish(topic, data) @@ -58,7 +59,7 @@ func denseConnect(t *testing.T, hosts []host.Host) { func connectSome(t *testing.T, hosts []host.Host, d int) { for i, a := range hosts { for j := 0; j < d; j++ { - n := rand.Intn(len(hosts)) + n := mrand.Intn(len(hosts)) if n == i { j-- continue @@ -157,7 +158,7 @@ func TestBasicFloodsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d the flooooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -1006,7 +1007,7 @@ func TestConfigurableMaxMessageSize(t *testing.T) { // 2mb payload. msg := make([]byte, 1<<21) - rand.Read(msg) + crand.Read(msg) err := psubs[0].Publish(topic, msg) if err != nil { t.Fatal(err) diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index 3ccb1ab4..8e9b40e3 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -2,7 +2,7 @@ package pubsub import ( "context" - "math/rand" + "crypto/rand" "strconv" "sync" "testing" diff --git a/gossipsub_test.go b/gossipsub_test.go index 8c9419d8..d4a8a79d 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3,9 +3,10 @@ package pubsub import ( "bytes" "context" + crand "crypto/rand" "fmt" "io" - "math/rand" + mrand "math/rand" "sync" "sync/atomic" "testing" @@ -63,7 +64,7 @@ func TestSparseGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -104,7 +105,7 @@ func TestDenseGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -358,7 +359,7 @@ func TestGossipsubGossip(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -416,7 +417,7 @@ func TestGossipsubGossipPiggyback(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) psubs[owner].Publish("bazcrux", msg) @@ -563,7 +564,7 @@ func TestGossipsubPrune(t *testing.T) { for i := 0; i < 10; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -661,7 +662,7 @@ func TestGossipsubPruneBackoffTime(t *testing.T) { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) // Don't publish from host 0, since everyone should have pruned it. - owner := rand.Intn(len(psubs)-1) + 1 + owner := mrand.Intn(len(psubs)-1) + 1 psubs[owner].Publish("foobar", msg) @@ -706,7 +707,7 @@ func TestGossipsubGraft(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -755,7 +756,7 @@ func TestGossipsubRemovePeer(t *testing.T) { for i := 0; i < 10; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := 5 + rand.Intn(len(psubs)-5) + owner := 5 + mrand.Intn(len(psubs)-5) psubs[owner].Publish("foobar", msg) @@ -803,7 +804,7 @@ func TestGossipsubGraftPruneRetry(t *testing.T) { for i, topic := range topics { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish(topic, msg) @@ -849,7 +850,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { // create a background flood of messages that overloads the queues done := make(chan struct{}) go func() { - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) for i := 0; i < 10000; i++ { msg := []byte("background flooooood") psubs[owner].Publish("flood", msg) @@ -887,7 +888,7 @@ func TestGossipsubControlPiggyback(t *testing.T) { for i, topic := range topics { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish(topic, msg) @@ -930,7 +931,7 @@ func TestMixedGossipsub(t *testing.T) { for i := 0; i < 100; i++ { msg := []byte(fmt.Sprintf("%d it's not a floooooood %d", i, i)) - owner := rand.Intn(len(psubs)) + owner := mrand.Intn(len(psubs)) psubs[owner].Publish("foobar", msg) @@ -2217,7 +2218,7 @@ func TestGossipsubRPCFragmentation(t *testing.T) { msgSize := 20000 for i := 0; i < nMessages; i++ { msg := make([]byte, msgSize) - rand.Read(msg) + crand.Read(msg) ps.Publish("test", msg) time.Sleep(20 * time.Millisecond) } @@ -2357,7 +2358,7 @@ func TestFragmentRPCFunction(t *testing.T) { mkMsg := func(size int) *pb.Message { msg := &pb.Message{} msg.Data = make([]byte, size-4) // subtract the protobuf overhead, so msg.Size() returns requested size - rand.Read(msg.Data) + crand.Read(msg.Data) return msg } @@ -2471,7 +2472,7 @@ func TestFragmentRPCFunction(t *testing.T) { messageIds := make([]string, msgsPerTopic) for m := 0; m < msgsPerTopic; m++ { mid := make([]byte, messageIdSize) - rand.Read(mid) + crand.Read(mid) messageIds[m] = string(mid) } rpc.Control.Ihave[i] = &pb.ControlIHave{MessageIDs: messageIds} @@ -2492,7 +2493,7 @@ func TestFragmentRPCFunction(t *testing.T) { // It should not be present in the fragmented messages, but smaller IDs should be rpc.Reset() giantIdBytes := make([]byte, limit*2) - rand.Read(giantIdBytes) + crand.Read(giantIdBytes) rpc.Control = &pb.ControlMessage{ Iwant: []*pb.ControlIWant{ {MessageIDs: []string{"hello", string(giantIdBytes)}}, From 097b4671b033d40918dc2ed9bbbabf1ab411300b Mon Sep 17 00:00:00 2001 From: galargh Date: Mon, 5 Aug 2024 19:51:38 +0200 Subject: [PATCH 13/21] chore: staticcheck --- floodsub_test.go | 1 + gossipsub_spam_test.go | 1 + gossipsub_test.go | 9 +++------ timecache/time_cache.go | 4 ---- topic_test.go | 2 +- trace_test.go | 1 + tracer.go | 1 + 7 files changed, 8 insertions(+), 11 deletions(-) diff --git a/floodsub_test.go b/floodsub_test.go index 8a2db35b..e7bf379f 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -21,6 +21,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index 8e9b40e3..ab22e7a9 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -15,6 +15,7 @@ import ( pb "github.com/libp2p/go-libp2p-pubsub/pb" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) diff --git a/gossipsub_test.go b/gossipsub_test.go index d4a8a79d..8c2a216e 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -20,6 +20,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/record" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) @@ -1924,13 +1925,11 @@ func TestGossipSubLeaveTopic(t *testing.T) { connect(t, h[0], h[1]) // Join all peers - var subs []*Subscription for _, ps := range psubs { - sub, err := ps.Subscribe("test") + _, err := ps.Subscribe("test") if err != nil { t.Fatal(err) } - subs = append(subs, sub) } time.Sleep(time.Second) @@ -2005,13 +2004,11 @@ func TestGossipSubJoinTopic(t *testing.T) { router0.backoff["test"] = peerMap // Join all peers - var subs []*Subscription for _, ps := range psubs { - sub, err := ps.Subscribe("test") + _, err := ps.Subscribe("test") if err != nil { t.Fatal(err) } - subs = append(subs, sub) } time.Sleep(time.Second) diff --git a/timecache/time_cache.go b/timecache/time_cache.go index e33bc354..ee34fd5b 100644 --- a/timecache/time_cache.go +++ b/timecache/time_cache.go @@ -2,12 +2,8 @@ package timecache import ( "time" - - logger "github.com/ipfs/go-log/v2" ) -var log = logger.Logger("pubsub/timecache") - // Stategy is the TimeCache expiration strategy to use. type Strategy uint8 diff --git a/topic_test.go b/topic_test.go index a27113b2..ef05feb4 100644 --- a/topic_test.go +++ b/topic_test.go @@ -743,7 +743,7 @@ func notifSubThenUnSub(ctx context.Context, t *testing.T, topics []*Topic) { } // Wait for the unsubscribe messages to reach the primary peer - for len(primaryTopic.ListPeers()) < 0 { + for len(primaryTopic.ListPeers()) > 0 { time.Sleep(time.Millisecond * 100) } diff --git a/trace_test.go b/trace_test.go index 7717a7e2..287216f1 100644 --- a/trace_test.go +++ b/trace_test.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) diff --git a/tracer.go b/tracer.go index 8e744c91..cbb92ad7 100644 --- a/tracer.go +++ b/tracer.go @@ -17,6 +17,7 @@ import ( "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/protocol" + //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated "github.com/libp2p/go-msgio/protoio" ) From 5c9a4d053625681baddf5cbe1d37fee8005f9820 Mon Sep 17 00:00:00 2001 From: Piotr Galar Date: Mon, 5 Aug 2024 20:03:41 +0200 Subject: [PATCH 14/21] ci: create go-test-config.json --- .github/workflows/go-test-config.json | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 .github/workflows/go-test-config.json diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json new file mode 100644 index 00000000..d59c1fbf --- /dev/null +++ b/.github/workflows/go-test-config.json @@ -0,0 +1,4 @@ +{ + "skipOSes": ["windows"], + "skipRace": true +} From dc33a34d4d4976ec48e44b32a59344513c560446 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 6 Aug 2024 20:43:14 +0000 Subject: [PATCH 15/21] ci: disable testing on macos (#571) It appears to be a bit flaky and we have nothing macos specific in this repo. --- .github/workflows/go-test-config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/go-test-config.json b/.github/workflows/go-test-config.json index d59c1fbf..b0642fbe 100644 --- a/.github/workflows/go-test-config.json +++ b/.github/workflows/go-test-config.json @@ -1,4 +1,4 @@ { - "skipOSes": ["windows"], + "skipOSes": ["windows", "macos"], "skipRace": true } From 19ffbb3a482caecabb8520917c631e3047a78094 Mon Sep 17 00:00:00 2001 From: Steven Allen Date: Tue, 6 Aug 2024 20:43:28 +0000 Subject: [PATCH 16/21] Re-enable disabled gossipsub test (#566) And change it to take into account the fact that libp2p now trims connections immediately (when no grace-period is specified) instead of waiting for a timeout. --- gossipsub_connmgr_test.go | 15 +-------------- gossipsub_test.go | 13 ++++++++++--- 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/gossipsub_connmgr_test.go b/gossipsub_connmgr_test.go index accf57dd..a5477026 100644 --- a/gossipsub_connmgr_test.go +++ b/gossipsub_connmgr_test.go @@ -15,7 +15,6 @@ import ( ) func TestGossipsubConnTagMessageDeliveries(t *testing.T) { - t.Skip("Test disabled with go-libp2p v0.22.0") // TODO: reenable test when updating to v0.23.0 ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -90,7 +89,7 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { // sybil squatters to be connected later sybilHosts := getDefaultHosts(t, nSquatter) for _, h := range sybilHosts { - squatter := &sybilSquatter{h: h} + squatter := &sybilSquatter{h: h, ignoreErrors: true} h.SetStreamHandler(GossipSubID_v10, squatter.handleStream) } @@ -144,18 +143,6 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { allHosts := append(honestHosts, sybilHosts...) connectAll(t, allHosts) - // verify that we have a bunch of connections - for _, h := range honestHosts { - if len(h.Network().Conns()) != nHonest+nSquatter-1 { - t.Errorf("expected to have conns to all peers, have %d", len(h.Network().Conns())) - } - } - - // force the connection managers to trim, so we don't need to muck about with timing as much - for _, cm := range connmgrs { - cm.TrimOpenConns(ctx) - } - // we should still have conns to all the honest peers, but not the sybils for _, h := range honestHosts { nHonestConns := 0 diff --git a/gossipsub_test.go b/gossipsub_test.go index 8c2a216e..4481be9e 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -2025,7 +2025,8 @@ func TestGossipSubJoinTopic(t *testing.T) { } type sybilSquatter struct { - h host.Host + h host.Host + ignoreErrors bool // set to false to ignore connection/stream errors. } func (sq *sybilSquatter) handleStream(s network.Stream) { @@ -2033,7 +2034,10 @@ func (sq *sybilSquatter) handleStream(s network.Stream) { os, err := sq.h.NewStream(context.Background(), s.Conn().RemotePeer(), GossipSubID_v10) if err != nil { - panic(err) + if !sq.ignoreErrors { + panic(err) + } + return } // send a subscription for test in the output stream to become candidate for GRAFT @@ -2044,7 +2048,10 @@ func (sq *sybilSquatter) handleStream(s network.Stream) { topic := "test" err = w.WriteMsg(&pb.RPC{Subscriptions: []*pb.RPC_SubOpts{{Subscribe: &truth, Topicid: &topic}}}) if err != nil { - panic(err) + if !sq.ignoreErrors { + panic(err) + } + return } var rpc pb.RPC From b421b3ab05638f24085f4087787d20add09b1e3e Mon Sep 17 00:00:00 2001 From: Pop Chunhapanya Date: Fri, 16 Aug 2024 22:16:35 +0700 Subject: [PATCH 17/21] GossipSub v1.2: IDONTWANT control message and priority queue. (#553) ## GossipSub v1.2 implementation Specification: libp2p/specs#548 ### Work Summary Sending IDONTWANT Implement a smart queue Add priorities to the smart queue Put IDONTWANT packets into the smart priority queue as soon as the node gets the packets Handling IDONTWANT Use a map to remember the message ids whose IDONTWANT packets have been received Implement max_idontwant_messages (ignore the IDONWANT packets if the max is reached) Clear the message IDs from the cache after 3 heartbeats Hash the message IDs before putting them into the cache. More requested features Add a feature test to not send IDONTWANT if the other side doesnt support it ### Commit Summary * Replace sending channel with the smart rpcQueue Since we want to implement a priority queue later, we need to replace the normal sending channels with the new smart structures first. * Implement UrgentPush in the smart rpcQueue UrgentPush allows you to push an rpc packet to the front of the queue so that it will be popped out fast. * Add IDONTWANT to rpc.proto and trace.proto * Send IDONTWANT right before validation step Most importantly, this commit adds a new method called PreValidation to the interface PubSubRouter, which will be called right before validating the gossipsub message. In GossipSubRouter, PreValidation will send the IDONTWANT controll messages to all the mesh peers of the topics of the received messages. * Test GossipSub IDONWANT sending * Send IDONWANT only for large messages * Handle IDONTWANT control messages When receiving IDONTWANTs, the host should remember the message ids contained in IDONTWANTs using a hash map. When receiving messages with those ids, it shouldn't forward them to the peers who already sent the IDONTWANTs. When the maximum number of IDONTWANTs is reached for any particular peer, the host should ignore any excessive IDONTWANTs from that peer. * Clear expired message IDs from the IDONTWANT cache If the messages IDs received from IDONTWANTs are older than 3 heartbeats, they should be removed from the IDONTWANT cache. * Keep the hashes of IDONTWANT message ids instead Rather than keeping the raw message ids, keep their hashes instead to save memory and protect again memory DoS attacks. * Increase GossipSubMaxIHaveMessages to 1000 * fixup! Clear expired message IDs from the IDONTWANT cache * Not send IDONTWANT if the receiver doesn't support * fixup! Replace sending channel with the smart rpcQueue * Not use pointers in rpcQueue * Simply rcpQueue by using only one mutex * Check ctx error in rpc sending worker Co-authored-by: Steven Allen * fixup! Simply rcpQueue by using only one mutex * fixup! Keep the hashes of IDONTWANT message ids instead * Use AfterFunc instead implementing our own * Fix misc lint errors * fixup! Fix misc lint errors * Revert "Increase GossipSubMaxIHaveMessages to 1000" This reverts commit 6fabcdd068a5f5238c5280a3460af9c3998418ec. * Increase GossipSubMaxIDontWantMessages to 1000 * fixup! Handle IDONTWANT control messages * Skip TestGossipsubConnTagMessageDeliveries * Skip FuzzAppendOrMergeRPC * Revert "Skip FuzzAppendOrMergeRPC" This reverts commit f141e13234de0960d139339acb636a1afea9e219. * fixup! Send IDONWANT only for large messages * fixup! fixup! Keep the hashes of IDONTWANT message ids instead * fixup! Implement UrgentPush in the smart rpcQueue * fixup! Use AfterFunc instead implementing our own --------- Co-authored-by: Steven Allen --- comm.go | 41 ++- floodsub.go | 12 +- gossipsub.go | 240 ++++++++++++---- gossipsub_connmgr_test.go | 1 + gossipsub_feat.go | 10 +- gossipsub_feat_test.go | 21 +- gossipsub_spam_test.go | 137 ++++++++- gossipsub_test.go | 587 +++++++++++++++++++++++++++++++++++++- pb/rpc.pb.go | 328 ++++++++++++++++++--- pb/rpc.proto | 8 +- pb/trace.pb.go | 391 ++++++++++++++++++++----- pb/trace.proto | 5 + pubsub.go | 90 +++--- randomsub.go | 12 +- rpc_queue.go | 147 ++++++++++ rpc_queue_test.go | 229 +++++++++++++++ trace.go | 20 +- 17 files changed, 2032 insertions(+), 247 deletions(-) create mode 100644 rpc_queue.go create mode 100644 rpc_queue_test.go diff --git a/comm.go b/comm.go index 2dee9b2e..d38cce08 100644 --- a/comm.go +++ b/comm.go @@ -114,7 +114,7 @@ func (p *PubSub) notifyPeerDead(pid peer.ID) { } } -func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing *rpcQueue) { s, err := p.host.NewStream(p.ctx, pid, p.rt.Protocols()...) if err != nil { log.Debug("opening new stream to peer: ", err, pid) @@ -135,7 +135,7 @@ func (p *PubSub) handleNewPeer(ctx context.Context, pid peer.ID, outgoing <-chan } } -func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing <-chan *RPC) { +func (p *PubSub) handleNewPeerWithBackoff(ctx context.Context, pid peer.ID, backoff time.Duration, outgoing *rpcQueue) { select { case <-time.After(backoff): p.handleNewPeer(ctx, pid, outgoing) @@ -156,7 +156,7 @@ func (p *PubSub) handlePeerDead(s network.Stream) { p.notifyPeerDead(pid) } -func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing <-chan *RPC) { +func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, outgoing *rpcQueue) { writeRpc := func(rpc *RPC) error { size := uint64(rpc.Size()) @@ -174,20 +174,17 @@ func (p *PubSub) handleSendingMessages(ctx context.Context, s network.Stream, ou } defer s.Close() - for { - select { - case rpc, ok := <-outgoing: - if !ok { - return - } + for ctx.Err() == nil { + rpc, err := outgoing.Pop(ctx) + if err != nil { + log.Debugf("popping message from the queue to send to %s: %s", s.Conn().RemotePeer(), err) + return + } - err := writeRpc(rpc) - if err != nil { - s.Reset() - log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) - return - } - case <-ctx.Done(): + err = writeRpc(rpc) + if err != nil { + s.Reset() + log.Debugf("writing message to %s: %s", s.Conn().RemotePeer(), err) return } } @@ -209,15 +206,17 @@ func rpcWithControl(msgs []*pb.Message, ihave []*pb.ControlIHave, iwant []*pb.ControlIWant, graft []*pb.ControlGraft, - prune []*pb.ControlPrune) *RPC { + prune []*pb.ControlPrune, + idontwant []*pb.ControlIDontWant) *RPC { return &RPC{ RPC: pb.RPC{ Publish: msgs, Control: &pb.ControlMessage{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, }, }, } diff --git a/floodsub.go b/floodsub.go index 20f592e2..45b3fdee 100644 --- a/floodsub.go +++ b/floodsub.go @@ -71,6 +71,8 @@ func (fs *FloodSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (fs *FloodSubRouter) PreValidation([]*Message) {} + func (fs *FloodSubRouter) HandleRPC(rpc *RPC) {} func (fs *FloodSubRouter) Publish(msg *Message) { @@ -83,19 +85,19 @@ func (fs *FloodSubRouter) Publish(msg *Message) { continue } - mch, ok := fs.p.peers[pid] + q, ok := fs.p.peers[pid] if !ok { continue } - select { - case mch <- out: - fs.tracer.SendRPC(out, pid) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", pid) fs.tracer.DropRPC(out, pid) // Drop it. The peer is too slow. + continue } + fs.tracer.SendRPC(out, pid) } } diff --git a/gossipsub.go b/gossipsub.go index 3121a210..117b585c 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -2,6 +2,7 @@ package pubsub import ( "context" + "crypto/sha256" "fmt" "io" "math/rand" @@ -22,13 +23,19 @@ import ( const ( // GossipSubID_v10 is the protocol ID for version 1.0.0 of the GossipSub protocol. - // It is advertised along with GossipSubID_v11 for backwards compatibility. + // It is advertised along with GossipSubID_v11 and GossipSubID_v12 for backwards compatibility. GossipSubID_v10 = protocol.ID("/meshsub/1.0.0") // GossipSubID_v11 is the protocol ID for version 1.1.0 of the GossipSub protocol. + // It is advertised along with GossipSubID_v12 for backwards compatibility. // See the spec for details about how v1.1.0 compares to v1.0.0: // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md GossipSubID_v11 = protocol.ID("/meshsub/1.1.0") + + // GossipSubID_v12 is the protocol ID for version 1.2.0 of the GossipSub protocol. + // See the spec for details about how v1.2.0 compares to v1.1.0: + // https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.2.md + GossipSubID_v12 = protocol.ID("/meshsub/1.2.0") ) // Defines the default gossipsub parameters. @@ -59,9 +66,17 @@ var ( GossipSubGraftFloodThreshold = 10 * time.Second GossipSubMaxIHaveLength = 5000 GossipSubMaxIHaveMessages = 10 + GossipSubMaxIDontWantMessages = 1000 GossipSubIWantFollowupTime = 3 * time.Second + GossipSubIDontWantMessageThreshold = 1024 // 1KB + GossipSubIDontWantMessageTTL = 3 // 3 heartbeats ) +type checksum struct { + payload [32]byte + length uint8 +} + // GossipSubParams defines all the gossipsub specific parameters. type GossipSubParams struct { // overlay parameters. @@ -201,10 +216,21 @@ type GossipSubParams struct { // MaxIHaveMessages is the maximum number of IHAVE messages to accept from a peer within a heartbeat. MaxIHaveMessages int + // MaxIDontWantMessages is the maximum number of IDONTWANT messages to accept from a peer within a heartbeat. + MaxIDontWantMessages int + // Time to wait for a message requested through IWANT following an IHAVE advertisement. // If the message is not received within this window, a broken promise is declared and // the router may apply bahavioural penalties. IWantFollowupTime time.Duration + + // IDONTWANT is only sent for messages larger than the threshold. This should be greater than + // D_high * the size of the message id. Otherwise, the attacker can do the amplication attack by sending + // small messages while the receiver replies back with larger IDONTWANT messages. + IDontWantMessageThreshold int + + // IDONTWANT is cleared when it's older than the TTL. + IDontWantMessageTTL int } // NewGossipSub returns a new PubSub object using the default GossipSubRouter as the router. @@ -223,23 +249,25 @@ func NewGossipSubWithRouter(ctx context.Context, h host.Host, rt PubSubRouter, o func DefaultGossipSubRouter(h host.Host) *GossipSubRouter { params := DefaultGossipSubParams() return &GossipSubRouter{ - peers: make(map[peer.ID]protocol.ID), - mesh: make(map[string]map[peer.ID]struct{}), - fanout: make(map[string]map[peer.ID]struct{}), - lastpub: make(map[string]int64), - gossip: make(map[peer.ID][]*pb.ControlIHave), - control: make(map[peer.ID]*pb.ControlMessage), - backoff: make(map[string]map[peer.ID]time.Time), - peerhave: make(map[peer.ID]int), - iasked: make(map[peer.ID]int), - outbound: make(map[peer.ID]bool), - connect: make(chan connectInfo, params.MaxPendingConnections), - cab: pstoremem.NewAddrBook(), - mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), - protos: GossipSubDefaultProtocols, - feature: GossipSubDefaultFeatures, - tagTracer: newTagTracer(h.ConnManager()), - params: params, + peers: make(map[peer.ID]protocol.ID), + mesh: make(map[string]map[peer.ID]struct{}), + fanout: make(map[string]map[peer.ID]struct{}), + lastpub: make(map[string]int64), + gossip: make(map[peer.ID][]*pb.ControlIHave), + control: make(map[peer.ID]*pb.ControlMessage), + backoff: make(map[string]map[peer.ID]time.Time), + peerhave: make(map[peer.ID]int), + peerdontwant: make(map[peer.ID]int), + unwanted: make(map[peer.ID]map[checksum]int), + iasked: make(map[peer.ID]int), + outbound: make(map[peer.ID]bool), + connect: make(chan connectInfo, params.MaxPendingConnections), + cab: pstoremem.NewAddrBook(), + mcache: NewMessageCache(params.HistoryGossip, params.HistoryLength), + protos: GossipSubDefaultProtocols, + feature: GossipSubDefaultFeatures, + tagTracer: newTagTracer(h.ConnManager()), + params: params, } } @@ -273,7 +301,10 @@ func DefaultGossipSubParams() GossipSubParams { GraftFloodThreshold: GossipSubGraftFloodThreshold, MaxIHaveLength: GossipSubMaxIHaveLength, MaxIHaveMessages: GossipSubMaxIHaveMessages, + MaxIDontWantMessages: GossipSubMaxIDontWantMessages, IWantFollowupTime: GossipSubIWantFollowupTime, + IDontWantMessageThreshold: GossipSubIDontWantMessageThreshold, + IDontWantMessageTTL: GossipSubIDontWantMessageTTL, SlowHeartbeatWarning: 0.1, } } @@ -422,20 +453,22 @@ func WithGossipSubParams(cfg GossipSubParams) Option { // is the fanout map. Fanout peer lists are expired if we don't publish any // messages to their topic for GossipSubFanoutTTL. type GossipSubRouter struct { - p *PubSub - peers map[peer.ID]protocol.ID // peer protocols - direct map[peer.ID]struct{} // direct peers - mesh map[string]map[peer.ID]struct{} // topic meshes - fanout map[string]map[peer.ID]struct{} // topic fanout - lastpub map[string]int64 // last publish time for fanout topics - gossip map[peer.ID][]*pb.ControlIHave // pending gossip - control map[peer.ID]*pb.ControlMessage // pending control messages - peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat - iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat - outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections - backoff map[string]map[peer.ID]time.Time // prune backoff - connect chan connectInfo // px connection requests - cab peerstore.AddrBook + p *PubSub + peers map[peer.ID]protocol.ID // peer protocols + direct map[peer.ID]struct{} // direct peers + mesh map[string]map[peer.ID]struct{} // topic meshes + fanout map[string]map[peer.ID]struct{} // topic fanout + lastpub map[string]int64 // last publish time for fanout topics + gossip map[peer.ID][]*pb.ControlIHave // pending gossip + control map[peer.ID]*pb.ControlMessage // pending control messages + peerhave map[peer.ID]int // number of IHAVEs received from peer in the last heartbeat + peerdontwant map[peer.ID]int // number of IDONTWANTs received from peer in the last heartbeat + unwanted map[peer.ID]map[checksum]int // TTL of the message ids peers don't want + iasked map[peer.ID]int // number of messages we have asked from peer in the last heartbeat + outbound map[peer.ID]bool // connection direction cache, marks peers with outbound connections + backoff map[string]map[peer.ID]time.Time // prune backoff + connect chan connectInfo // px connection requests + cab peerstore.AddrBook protos []protocol.ID feature GossipSubFeatureTest @@ -663,6 +696,36 @@ func (gs *GossipSubRouter) AcceptFrom(p peer.ID) AcceptStatus { return gs.gate.AcceptFrom(p) } +// PreValidation sends the IDONTWANT control messages to all the mesh +// peers. They need to be sent right before the validation because they +// should be seen by the peers as soon as possible. +func (gs *GossipSubRouter) PreValidation(msgs []*Message) { + tmids := make(map[string][]string) + for _, msg := range msgs { + if len(msg.GetData()) < gs.params.IDontWantMessageThreshold { + continue + } + topic := msg.GetTopic() + tmids[topic] = append(tmids[topic], gs.p.idGen.ID(msg)) + } + for topic, mids := range tmids { + if len(mids) == 0 { + continue + } + // shuffle the messages got from the RPC envelope + shuffleStrings(mids) + // send IDONTWANT to all the mesh peers + for p := range gs.mesh[topic] { + // send to only peers that support IDONTWANT + if gs.feature(GossipSubFeatureIdontwant, gs.peers[p]) { + idontwant := []*pb.ControlIDontWant{{MessageIDs: mids}} + out := rpcWithControl(nil, nil, nil, nil, nil, idontwant) + gs.sendRPC(p, out, true) + } + } + } +} + func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ctl := rpc.GetControl() if ctl == nil { @@ -673,13 +736,14 @@ func (gs *GossipSubRouter) HandleRPC(rpc *RPC) { ihave := gs.handleIWant(rpc.from, ctl) prune := gs.handleGraft(rpc.from, ctl) gs.handlePrune(rpc.from, ctl) + gs.handleIDontWant(rpc.from, ctl) if len(iwant) == 0 && len(ihave) == 0 && len(prune) == 0 { return } - out := rpcWithControl(ihave, nil, iwant, nil, prune) - gs.sendRPC(rpc.from, out) + out := rpcWithControl(ihave, nil, iwant, nil, prune, nil) + gs.sendRPC(rpc.from, out, false) } func (gs *GossipSubRouter) handleIHave(p peer.ID, ctl *pb.ControlMessage) []*pb.ControlIWant { @@ -931,6 +995,26 @@ func (gs *GossipSubRouter) handlePrune(p peer.ID, ctl *pb.ControlMessage) { } } +func (gs *GossipSubRouter) handleIDontWant(p peer.ID, ctl *pb.ControlMessage) { + if gs.unwanted[p] == nil { + gs.unwanted[p] = make(map[checksum]int) + } + + // IDONTWANT flood protection + if gs.peerdontwant[p] >= gs.params.MaxIDontWantMessages { + log.Debugf("IDONWANT: peer %s has advertised too many times (%d) within this heartbeat interval; ignoring", p, gs.peerdontwant[p]) + return + } + gs.peerdontwant[p]++ + + // Remember all the unwanted message ids + for _, idontwant := range ctl.GetIdontwant() { + for _, mid := range idontwant.GetMessageIDs() { + gs.unwanted[p][computeChecksum(mid)] = gs.params.IDontWantMessageTTL + } + } +} + func (gs *GossipSubRouter) addBackoff(p peer.ID, topic string, isUnsubscribe bool) { backoff := gs.params.PruneBackoff if isUnsubscribe { @@ -1091,6 +1175,12 @@ func (gs *GossipSubRouter) Publish(msg *Message) { } for p := range gmap { + mid := gs.p.idGen.ID(msg) + // Check if it has already received an IDONTWANT for the message. + // If so, don't send it to the peer + if _, ok := gs.unwanted[p][computeChecksum(mid)]; ok { + continue + } tosend[p] = struct{}{} } } @@ -1101,7 +1191,7 @@ func (gs *GossipSubRouter) Publish(msg *Message) { continue } - gs.sendRPC(pid, out) + gs.sendRPC(pid, out, false) } } @@ -1186,17 +1276,17 @@ func (gs *GossipSubRouter) Leave(topic string) { func (gs *GossipSubRouter) sendGraft(p peer.ID, topic string) { graft := []*pb.ControlGraft{{TopicID: &topic}} - out := rpcWithControl(nil, nil, nil, graft, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, nil, nil) + gs.sendRPC(p, out, false) } func (gs *GossipSubRouter) sendPrune(p peer.ID, topic string, isUnsubscribe bool) { prune := []*pb.ControlPrune{gs.makePrune(p, topic, gs.doPX, isUnsubscribe)} - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } -func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { +func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { // do we own the RPC? own := false @@ -1220,14 +1310,14 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { delete(gs.gossip, p) } - mch, ok := gs.p.peers[p] + q, ok := gs.p.peers[p] if !ok { return } // If we're below the max message size, go ahead and send if out.Size() < gs.p.maxMessageSize { - gs.doSendRPC(out, p, mch) + gs.doSendRPC(out, p, q, urgent) return } @@ -1239,7 +1329,7 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC) { gs.doDropRPC(out, p, fmt.Sprintf("Dropping oversized RPC. Size: %d, limit: %d. (Over by %d bytes)", rpc.Size(), gs.p.maxMessageSize, rpc.Size()-gs.p.maxMessageSize)) continue } - gs.doSendRPC(rpc, p, mch) + gs.doSendRPC(rpc, p, q, urgent) } } @@ -1253,13 +1343,18 @@ func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { } } -func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, mch chan *RPC) { - select { - case mch <- rpc: - gs.tracer.SendRPC(rpc, p) - default: +func (gs *GossipSubRouter) doSendRPC(rpc *RPC, p peer.ID, q *rpcQueue, urgent bool) { + var err error + if urgent { + err = q.UrgentPush(rpc, false) + } else { + err = q.Push(rpc, false) + } + if err != nil { gs.doDropRPC(rpc, p, "queue full") + return } + gs.tracer.SendRPC(rpc, p) } // appendOrMergeRPC appends the given RPCs to the slice, merging them if possible. @@ -1441,6 +1536,9 @@ func (gs *GossipSubRouter) heartbeat() { // clean up iasked counters gs.clearIHaveCounters() + // clean up IDONTWANT counters + gs.clearIDontWantCounters() + // apply IWANT request penalties gs.applyIwantPenalties() @@ -1693,6 +1791,23 @@ func (gs *GossipSubRouter) clearIHaveCounters() { } } +func (gs *GossipSubRouter) clearIDontWantCounters() { + if len(gs.peerdontwant) > 0 { + // throw away the old map and make a new one + gs.peerdontwant = make(map[peer.ID]int) + } + + // decrement TTLs of all the IDONTWANTs and delete it from the cache when it reaches zero + for _, mids := range gs.unwanted { + for mid := range mids { + mids[mid]-- + if mids[mid] == 0 { + delete(mids, mid) + } + } + } +} + func (gs *GossipSubRouter) applyIwantPenalties() { for p, count := range gs.gossipTracer.GetBrokenPromises() { log.Infof("peer %s didn't follow up in %d IWANT requests; adding penalty", p, count) @@ -1767,8 +1882,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, } } - out := rpcWithControl(nil, nil, nil, graft, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, graft, prune, nil) + gs.sendRPC(p, out, false) } for p, topics := range toprune { @@ -1777,8 +1892,8 @@ func (gs *GossipSubRouter) sendGraftPrune(tograft, toprune map[peer.ID][]string, prune = append(prune, gs.makePrune(p, topic, gs.doPX && !noPX[p], false)) } - out := rpcWithControl(nil, nil, nil, nil, prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, nil, prune, nil) + gs.sendRPC(p, out, false) } } @@ -1844,15 +1959,15 @@ func (gs *GossipSubRouter) flush() { // send gossip first, which will also piggyback pending control for p, ihave := range gs.gossip { delete(gs.gossip, p) - out := rpcWithControl(nil, ihave, nil, nil, nil) - gs.sendRPC(p, out) + out := rpcWithControl(nil, ihave, nil, nil, nil, nil) + gs.sendRPC(p, out, false) } // send the remaining control messages that wasn't merged with gossip for p, ctl := range gs.control { delete(gs.control, p) - out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune) - gs.sendRPC(p, out) + out := rpcWithControl(nil, nil, nil, ctl.Graft, ctl.Prune, nil) + gs.sendRPC(p, out, false) } } @@ -1873,9 +1988,10 @@ func (gs *GossipSubRouter) piggybackGossip(p peer.ID, out *RPC, ihave []*pb.Cont } func (gs *GossipSubRouter) pushControl(p peer.ID, ctl *pb.ControlMessage) { - // remove IHAVE/IWANT from control message, gossip is not retried + // remove IHAVE/IWANT/IDONTWANT from control message, gossip is not retried ctl.Ihave = nil ctl.Iwant = nil + ctl.Idontwant = nil if ctl.Graft != nil || ctl.Prune != nil { gs.control[p] = ctl } @@ -2037,3 +2153,13 @@ func shuffleStrings(lst []string) { lst[i], lst[j] = lst[j], lst[i] } } + +func computeChecksum(mid string) checksum { + var cs checksum + if len(mid) > 32 || len(mid) == 0 { + cs.payload = sha256.Sum256([]byte(mid)) + } else { + cs.length = uint8(copy(cs.payload[:], mid)) + } + return cs +} diff --git a/gossipsub_connmgr_test.go b/gossipsub_connmgr_test.go index a5477026..e72f5545 100644 --- a/gossipsub_connmgr_test.go +++ b/gossipsub_connmgr_test.go @@ -15,6 +15,7 @@ import ( ) func TestGossipsubConnTagMessageDeliveries(t *testing.T) { + t.Skip("flaky test disabled") ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/gossipsub_feat.go b/gossipsub_feat.go index d5750af3..49c7423c 100644 --- a/gossipsub_feat.go +++ b/gossipsub_feat.go @@ -18,18 +18,22 @@ const ( GossipSubFeatureMesh = iota // Protocol supports Peer eXchange on prune -- gossipsub-v1.1 compatible GossipSubFeaturePX + // Protocol supports IDONTWANT -- gossipsub-v1.2 compatible + GossipSubFeatureIdontwant ) // GossipSubDefaultProtocols is the default gossipsub router protocol list -var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v11, GossipSubID_v10, FloodSubID} +var GossipSubDefaultProtocols = []protocol.ID{GossipSubID_v12, GossipSubID_v11, GossipSubID_v10, FloodSubID} // GossipSubDefaultFeatures is the feature test function for the default gossipsub protocols func GossipSubDefaultFeatures(feat GossipSubFeature, proto protocol.ID) bool { switch feat { case GossipSubFeatureMesh: - return proto == GossipSubID_v11 || proto == GossipSubID_v10 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 || proto == GossipSubID_v10 case GossipSubFeaturePX: - return proto == GossipSubID_v11 + return proto == GossipSubID_v12 || proto == GossipSubID_v11 + case GossipSubFeatureIdontwant: + return proto == GossipSubID_v12 default: return false } diff --git a/gossipsub_feat_test.go b/gossipsub_feat_test.go index 93cfb4c3..ff3709a3 100644 --- a/gossipsub_feat_test.go +++ b/gossipsub_feat_test.go @@ -21,6 +21,9 @@ func TestDefaultGossipSubFeatures(t *testing.T) { if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v11) { t.Fatal("gossipsub-v1.1 should support Mesh") } + if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support Mesh") + } if GossipSubDefaultFeatures(GossipSubFeaturePX, FloodSubID) { t.Fatal("floodsub should not support PX") @@ -28,9 +31,25 @@ func TestDefaultGossipSubFeatures(t *testing.T) { if GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v10) { t.Fatal("gossipsub-v1.0 should not support PX") } - if !GossipSubDefaultFeatures(GossipSubFeatureMesh, GossipSubID_v11) { + if !GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v11) { t.Fatal("gossipsub-v1.1 should support PX") } + if !GossipSubDefaultFeatures(GossipSubFeaturePX, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support PX") + } + + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, FloodSubID) { + t.Fatal("floodsub should not support IDONTWANT") + } + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v10) { + t.Fatal("gossipsub-v1.0 should not support IDONTWANT") + } + if GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v11) { + t.Fatal("gossipsub-v1.1 should not support IDONTWANT") + } + if !GossipSubDefaultFeatures(GossipSubFeatureIdontwant, GossipSubID_v12) { + t.Fatal("gossipsub-v1.2 should support IDONTWANT") + } } func TestGossipSubCustomProtocols(t *testing.T) { diff --git a/gossipsub_spam_test.go b/gossipsub_spam_test.go index ab22e7a9..df2fffff 100644 --- a/gossipsub_spam_test.go +++ b/gossipsub_spam_test.go @@ -3,6 +3,7 @@ package pubsub import ( "context" "crypto/rand" + "encoding/base64" "strconv" "sync" "testing" @@ -121,7 +122,7 @@ func TestGossipsubAttackSpamIWANT(t *testing.T) { // being spammy) iwantlst := []string{DefaultMsgIdFn(msg)} iwant := []*pb.ControlIWant{{MessageIDs: iwantlst}} - orpc := rpcWithControl(nil, nil, iwant, nil, nil) + orpc := rpcWithControl(nil, nil, iwant, nil, nil, nil) writeMsg(&orpc.RPC) } }) @@ -208,7 +209,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { for i := 0; i < 3*GossipSubMaxIHaveLength; i++ { ihavelst := []string{"someid" + strconv.Itoa(i)} ihave := []*pb.ControlIHave{{TopicID: sub.Topicid, MessageIDs: ihavelst}} - orpc := rpcWithControl(nil, ihave, nil, nil, nil) + orpc := rpcWithControl(nil, ihave, nil, nil, nil, nil) writeMsg(&orpc.RPC) } @@ -238,7 +239,7 @@ func TestGossipsubAttackSpamIHAVE(t *testing.T) { for i := 0; i < 3*GossipSubMaxIHaveLength; i++ { ihavelst := []string{"someid" + strconv.Itoa(i+100)} ihave := []*pb.ControlIHave{{TopicID: sub.Topicid, MessageIDs: ihavelst}} - orpc := rpcWithControl(nil, ihave, nil, nil, nil) + orpc := rpcWithControl(nil, ihave, nil, nil, nil, nil) writeMsg(&orpc.RPC) } @@ -765,11 +766,139 @@ func TestGossipsubAttackInvalidMessageSpam(t *testing.T) { <-ctx.Done() } +// Test that when Gossipsub receives too many IDONTWANT messages from a peer +func TestGossipsubAttackSpamIDONTWANT(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := time.Second + GossipSubHeartbeatInterval + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received some messages + var expMid string + var actMids []string + checkMsgs := func() { + if len(actMids) == 0 { + t.Fatalf("Expected some messages when the maximum number of IDONTWANTs is reached") + } + if actMids[0] != expMid { + t.Fatalf("The expected message is incorrect") + } + if len(actMids) > 1 { + t.Fatalf("The spam prevention should be reset after the heartbeat") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Each time the host receives a message + for _, msg := range irpc.GetPublish() { + actMids = append(actMids, msgID(msg)) + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + var mid string + for i := 0; i < 1+GossipSubMaxIDontWantMessages; i++ { + rand.Read(data) + mid = msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + } + // The host should receives this message id because the maximum was reached + expMid = mid + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + + // Wait for the next heartbeat so that the prevention will be reset + select { + case <-ctx.Done(): + return + case <-time.After(GossipSubHeartbeatInterval): + } + + // Test IDONTWANT again to see that it now works again + rand.Read(data) + mid = msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + time.Sleep(100 * time.Millisecond) + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + type mockGSOnRead func(writeMsg func(*pb.RPC), irpc *pb.RPC) func newMockGS(ctx context.Context, t *testing.T, attacker host.Host, onReadMsg mockGSOnRead) { + newMockGSWithVersion(ctx, t, attacker, protocol.ID("/meshsub/1.2.0"), onReadMsg) +} + +func newMockGSWithVersion(ctx context.Context, t *testing.T, attacker host.Host, gossipSubID protocol.ID, onReadMsg mockGSOnRead) { // Listen on the gossipsub protocol - const gossipSubID = protocol.ID("/meshsub/1.0.0") const maxMessageSize = 1024 * 1024 attacker.SetStreamHandler(gossipSubID, func(stream network.Stream) { // When an incoming stream is opened, set up an outgoing stream diff --git a/gossipsub_test.go b/gossipsub_test.go index 4481be9e..3b45557c 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -4,9 +4,11 @@ import ( "bytes" "context" crand "crypto/rand" + "encoding/base64" "fmt" "io" mrand "math/rand" + "sort" "sync" "sync/atomic" "testing" @@ -18,6 +20,7 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/peerstore" + "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" //lint:ignore SA1019 "github.com/libp2p/go-msgio/protoio" is deprecated @@ -2326,7 +2329,7 @@ func (iwe *iwantEverything) handleStream(s network.Stream) { } } - out := rpcWithControl(nil, nil, iwants, nil, prunes) + out := rpcWithControl(nil, nil, iwants, nil, prunes, nil) err = w.WriteMsg(out) if err != nil { panic(err) @@ -2590,3 +2593,585 @@ func TestGossipsubManagesAnAddressBook(t *testing.T) { t.Fatalf("expected no addrs, got %d addrs", len(addrs)) } } + +func TestGossipsubIdontwantSend(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + validated := false + validate := func(context.Context, peer.ID, *Message) bool { + time.Sleep(100 * time.Millisecond) + validated = true + return true + } + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], + WithGossipSubParams(params), + WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], + WithGossipSubParams(params), + WithMessageIdFn(msgID), + WithDefaultValidator(validate)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + var expMids []string + var actMids []string + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + m := &pb.Message{Data: data} + expMids = append(expMids, msgID(m)) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received the right IDONTWANT messages + checkMsgs := func() { + sort.Strings(actMids) + sort.Strings(expMids) + + if len(actMids) != len(expMids) { + t.Fatalf("Expected %d IDONTWANT messages, got %d", len(expMids), len(actMids)) + } + for i, expMid := range expMids { + actMid := actMids[i] + if actMid != expMid { + t.Fatalf("Expected the id of %s in the %d'th IDONTWANT messages, got %s", expMid, i+1, actMid) + } + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for _, idonthave := range irpc.GetControl().GetIdontwant() { + // If true, it means that, when we get IDONTWANT, the middle peer has done validation + // already, which should not be the case + if validated { + t.Fatalf("IDONTWANT should be sent before doing validation") + } + for _, mid := range idonthave.GetMessageIDs() { + // Add the message to the list and reset the timer + actMids = append(actMids, mid) + msgTimer.Reset(msgWaitMax) + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +func TestGossipsubIdontwantReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received no messages + received := false + checkMsgs := func() { + if received { + t.Fatalf("Expected no messages received after IDONWANT") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Check if it receives any message + if len(irpc.GetPublish()) > 0 { + received = true + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + crand.Read(data) + mid := msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that non-mesh peers will not get IDONTWANT +func TestGossipsubIdontwantNonMesh(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and pruning to the middle peer to make sure + // that it's not in the mesh + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Prune: []*pb.ControlPrune{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that peers with incompatible versions will not get IDONTWANT +func TestGossipsubIdontwantIncompat(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 16) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + // Use the old GossipSub version + newMockGSWithVersion(ctx, t, hosts[2], protocol.ID("/meshsub/1.1.0"), func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that IDONTWANT will not be sent for small messages +func TestGossipsubIdontwantSmallMessage(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + params := DefaultGossipSubParams() + params.IDontWantMessageThreshold = 16 + psubs := getGossipsubs(ctx, hosts[:2], WithGossipSubParams(params)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Used to publish a message with random data + publishMsg := func() { + data := make([]byte, 8) + crand.Read(data) + + if err := psubs[0].Publish(topic, data); err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking we got the right messages + msgWaitMax := time.Second + msgTimer := time.NewTimer(msgWaitMax) + received := false + + // Checks if we received any IDONTWANT + checkMsgs := func() { + if received { + t.Fatalf("No IDONTWANT is expected") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and pruning to the middle peer to make sure + // that it's not in the mesh + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + time.Sleep(100 * time.Millisecond) + + // Publish messages from the first peer + for i := 0; i < 10; i++ { + publishMsg() + } + }() + } + } + + // Each time the middle peer sends an IDONTWANT message + for range irpc.GetControl().GetIdontwant() { + received = true + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} + +// Test that IDONTWANT will cleared when it's old enough +func TestGossipsubIdontwantClear(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + hosts := getDefaultHosts(t, 3) + + msgID := func(pmsg *pb.Message) string { + // silly content-based test message-ID: just use the data as whole + return base64.URLEncoding.EncodeToString(pmsg.Data) + } + + psubs := make([]*PubSub, 2) + psubs[0] = getGossipsub(ctx, hosts[0], WithMessageIdFn(msgID)) + psubs[1] = getGossipsub(ctx, hosts[1], WithMessageIdFn(msgID)) + + topic := "foobar" + for _, ps := range psubs { + _, err := ps.Subscribe(topic) + if err != nil { + t.Fatal(err) + } + } + + // Wait a bit after the last message before checking the result + msgWaitMax := 5 * time.Second + msgTimer := time.NewTimer(msgWaitMax) + + // Checks we received some message after the IDONTWANT is cleared + received := false + checkMsgs := func() { + if !received { + t.Fatalf("Expected some message after the IDONTWANT is cleared") + } + } + + // Wait for the timer to expire + go func() { + select { + case <-msgTimer.C: + checkMsgs() + cancel() + return + case <-ctx.Done(): + checkMsgs() + } + }() + + newMockGS(ctx, t, hosts[2], func(writeMsg func(*pb.RPC), irpc *pb.RPC) { + // Check if it receives any message + if len(irpc.GetPublish()) > 0 { + received = true + } + // When the middle peer connects it will send us its subscriptions + for _, sub := range irpc.GetSubscriptions() { + if sub.GetSubscribe() { + // Reply by subcribing to the topic and grafting to the middle peer + writeMsg(&pb.RPC{ + Subscriptions: []*pb.RPC_SubOpts{{Subscribe: sub.Subscribe, Topicid: sub.Topicid}}, + Control: &pb.ControlMessage{Graft: []*pb.ControlGraft{{TopicID: sub.Topicid}}}, + }) + + go func() { + // Wait for a short interval to make sure the middle peer + // received and processed the subscribe + graft + time.Sleep(100 * time.Millisecond) + + // Generate a message and send IDONTWANT to the middle peer + data := make([]byte, 16) + crand.Read(data) + mid := msgID(&pb.Message{Data: data}) + writeMsg(&pb.RPC{ + Control: &pb.ControlMessage{Idontwant: []*pb.ControlIDontWant{{MessageIDs: []string{mid}}}}, + }) + + // Wait for a short interval to make sure the middle peer + // received and processed the IDONTWANTs + time.Sleep(100 * time.Millisecond) + + // Wait for 4 heartbeats to make sure the IDONTWANT is cleared + time.Sleep(4 * time.Second) + + // Publish the message from the first peer + if err := psubs[0].Publish(topic, data); err != nil { + t.Error(err) + return // cannot call t.Fatal in a non-test goroutine + } + }() + } + } + }) + + connect(t, hosts[0], hosts[1]) + connect(t, hosts[1], hosts[2]) + + <-ctx.Done() +} diff --git a/pb/rpc.pb.go b/pb/rpc.pb.go index c6a2475f..151cb44d 100644 --- a/pb/rpc.pb.go +++ b/pb/rpc.pb.go @@ -228,13 +228,14 @@ func (m *Message) GetKey() []byte { } type ControlMessage struct { - Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*ControlIHave `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*ControlIWant `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*ControlGraft `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*ControlPrune `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*ControlIDontWant `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ControlMessage) Reset() { *m = ControlMessage{} } @@ -298,6 +299,13 @@ func (m *ControlMessage) GetPrune() []*ControlPrune { return nil } +func (m *ControlMessage) GetIdontwant() []*ControlIDontWant { + if m != nil { + return m.Idontwant + } + return nil +} + type ControlIHave struct { TopicID *string `protobuf:"bytes,1,opt,name=topicID" json:"topicID,omitempty"` // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings @@ -512,6 +520,54 @@ func (m *ControlPrune) GetBackoff() uint64 { return 0 } +type ControlIDontWant struct { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + MessageIDs []string `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ControlIDontWant) Reset() { *m = ControlIDontWant{} } +func (m *ControlIDontWant) String() string { return proto.CompactTextString(m) } +func (*ControlIDontWant) ProtoMessage() {} +func (*ControlIDontWant) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{7} +} +func (m *ControlIDontWant) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControlIDontWant) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ControlIDontWant.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ControlIDontWant) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControlIDontWant.Merge(m, src) +} +func (m *ControlIDontWant) XXX_Size() int { + return m.Size() +} +func (m *ControlIDontWant) XXX_DiscardUnknown() { + xxx_messageInfo_ControlIDontWant.DiscardUnknown(m) +} + +var xxx_messageInfo_ControlIDontWant proto.InternalMessageInfo + +func (m *ControlIDontWant) GetMessageIDs() []string { + if m != nil { + return m.MessageIDs + } + return nil +} + type PeerInfo struct { PeerID []byte `protobuf:"bytes,1,opt,name=peerID" json:"peerID,omitempty"` SignedPeerRecord []byte `protobuf:"bytes,2,opt,name=signedPeerRecord" json:"signedPeerRecord,omitempty"` @@ -524,7 +580,7 @@ func (m *PeerInfo) Reset() { *m = PeerInfo{} } func (m *PeerInfo) String() string { return proto.CompactTextString(m) } func (*PeerInfo) ProtoMessage() {} func (*PeerInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{7} + return fileDescriptor_77a6da22d6a3feb1, []int{8} } func (m *PeerInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -576,43 +632,46 @@ func init() { proto.RegisterType((*ControlIWant)(nil), "pubsub.pb.ControlIWant") proto.RegisterType((*ControlGraft)(nil), "pubsub.pb.ControlGraft") proto.RegisterType((*ControlPrune)(nil), "pubsub.pb.ControlPrune") + proto.RegisterType((*ControlIDontWant)(nil), "pubsub.pb.ControlIDontWant") proto.RegisterType((*PeerInfo)(nil), "pubsub.pb.PeerInfo") } func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 480 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x8e, 0xd3, 0x3c, - 0x10, 0xc7, 0xe5, 0x6d, 0xbb, 0xd9, 0xcc, 0xe6, 0xfb, 0xb4, 0x32, 0x68, 0x31, 0x08, 0x55, 0x55, - 0x4e, 0x01, 0x41, 0x0e, 0xcb, 0x95, 0x0b, 0xb4, 0x12, 0x9b, 0x03, 0x50, 0x99, 0x03, 0x67, 0x27, - 0x75, 0xba, 0xd1, 0x6e, 0x63, 0x63, 0x3b, 0x8b, 0x78, 0x08, 0xde, 0x8b, 0x03, 0x07, 0x1e, 0x01, - 0xf5, 0xc6, 0x5b, 0x20, 0x3b, 0x4e, 0x9a, 0xa5, 0x94, 0x9b, 0xe7, 0xef, 0xdf, 0xcc, 0xfc, 0x3d, - 0x1e, 0x08, 0x95, 0x2c, 0x52, 0xa9, 0x84, 0x11, 0x38, 0x94, 0x4d, 0xae, 0x9b, 0x3c, 0x95, 0x79, - 0xfc, 0x0b, 0xc1, 0x88, 0x2e, 0xe7, 0xf8, 0x25, 0xfc, 0xa7, 0x9b, 0x5c, 0x17, 0xaa, 0x92, 0xa6, - 0x12, 0xb5, 0x26, 0x68, 0x36, 0x4a, 0x4e, 0x2f, 0xce, 0xd3, 0x1e, 0x4d, 0xe9, 0x72, 0x9e, 0x7e, - 0x68, 0xf2, 0xf7, 0xd2, 0x68, 0x7a, 0x17, 0xc6, 0xcf, 0x20, 0x90, 0x4d, 0x7e, 0x53, 0xe9, 0x2b, - 0x72, 0xe4, 0xf2, 0xf0, 0x20, 0xef, 0x2d, 0xd7, 0x9a, 0xad, 0x39, 0xed, 0x10, 0xfc, 0x02, 0x82, - 0x42, 0xd4, 0x46, 0x89, 0x1b, 0x32, 0x9a, 0xa1, 0xe4, 0xf4, 0xe2, 0xe1, 0x80, 0x9e, 0xb7, 0x37, - 0x7d, 0x92, 0x27, 0x1f, 0xbd, 0x82, 0xc0, 0x37, 0xc7, 0x8f, 0x21, 0xf4, 0xed, 0x73, 0x4e, 0xd0, - 0x0c, 0x25, 0x27, 0x74, 0x27, 0x60, 0x02, 0x81, 0x11, 0xb2, 0x2a, 0xaa, 0x15, 0x39, 0x9a, 0xa1, - 0x24, 0xa4, 0x5d, 0x18, 0x7f, 0x45, 0x10, 0xf8, 0xba, 0x18, 0xc3, 0xb8, 0x54, 0x62, 0xe3, 0xd2, - 0x23, 0xea, 0xce, 0x56, 0x5b, 0x31, 0xc3, 0x5c, 0x5a, 0x44, 0xdd, 0x19, 0xdf, 0x87, 0x89, 0xe6, - 0x9f, 0x6a, 0xe1, 0x9c, 0x46, 0xb4, 0x0d, 0xac, 0xea, 0x8a, 0x92, 0xb1, 0xeb, 0xd0, 0x06, 0xce, - 0x57, 0xb5, 0xae, 0x99, 0x69, 0x14, 0x27, 0x13, 0xc7, 0xef, 0x04, 0x7c, 0x06, 0xa3, 0x6b, 0xfe, - 0x85, 0x1c, 0x3b, 0xdd, 0x1e, 0xe3, 0xef, 0x08, 0xfe, 0xbf, 0xfb, 0x5c, 0xfc, 0x1c, 0x26, 0xd5, - 0x15, 0xbb, 0xe5, 0x7e, 0xfc, 0x0f, 0xf6, 0x07, 0x93, 0x5d, 0xb2, 0x5b, 0x4e, 0x5b, 0xca, 0xe1, - 0x9f, 0x59, 0x6d, 0xfc, 0xd4, 0xff, 0x86, 0x7f, 0x64, 0xb5, 0xa1, 0x2d, 0x65, 0xf1, 0xb5, 0x62, - 0xa5, 0x21, 0xa3, 0x43, 0xf8, 0x1b, 0x7b, 0x4d, 0x5b, 0xca, 0xe2, 0x52, 0x35, 0x35, 0x27, 0xe3, - 0x43, 0xf8, 0xd2, 0x5e, 0xd3, 0x96, 0x8a, 0x2f, 0x21, 0x1a, 0x7a, 0xec, 0x3f, 0x22, 0x5b, 0xb8, - 0x29, 0x77, 0x1f, 0x91, 0x2d, 0xf0, 0x14, 0x60, 0xd3, 0x3e, 0x38, 0x5b, 0x68, 0xe7, 0x3d, 0xa4, - 0x03, 0x25, 0x4e, 0x77, 0x95, 0xac, 0xfd, 0x3f, 0x78, 0xb4, 0xc7, 0x27, 0x3d, 0xef, 0xfc, 0x1f, - 0xee, 0x1c, 0x6f, 0x7a, 0xd2, 0x59, 0xff, 0x87, 0xc7, 0x27, 0x30, 0x91, 0x9c, 0x2b, 0xed, 0x47, - 0x7b, 0x6f, 0xf0, 0xf8, 0x25, 0xe7, 0x2a, 0xab, 0x4b, 0x41, 0x5b, 0xc2, 0x16, 0xc9, 0x59, 0x71, - 0x2d, 0xca, 0xd2, 0x6d, 0xc9, 0x98, 0x76, 0x61, 0xfc, 0x0e, 0x4e, 0x3a, 0x18, 0x9f, 0xc3, 0xb1, - 0xc5, 0x7d, 0xa7, 0x88, 0xfa, 0x08, 0x3f, 0x85, 0x33, 0xbb, 0x24, 0x7c, 0x65, 0x49, 0xca, 0x0b, - 0xa1, 0x56, 0x7e, 0x03, 0xf7, 0xf4, 0xd7, 0xd1, 0xb7, 0xed, 0x14, 0xfd, 0xd8, 0x4e, 0xd1, 0xcf, - 0xed, 0x14, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xf8, 0xc4, 0x6e, 0xd2, 0x03, 0x00, 0x00, + // 511 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcd, 0x6e, 0x13, 0x31, + 0x10, 0xc7, 0xe5, 0x7c, 0x34, 0xdd, 0xe9, 0x82, 0x22, 0x83, 0x8a, 0xf9, 0x50, 0x14, 0xed, 0x29, + 0x20, 0xd8, 0x43, 0x38, 0x21, 0x71, 0x81, 0x44, 0xa2, 0x39, 0x00, 0x91, 0x39, 0x70, 0xde, 0xdd, + 0x38, 0xe9, 0xaa, 0x8d, 0x6d, 0x6c, 0x6f, 0x11, 0x4f, 0xc0, 0x89, 0xf7, 0xe2, 0xc8, 0x23, 0xa0, + 0xdc, 0x78, 0x0b, 0xe4, 0x59, 0xe7, 0xa3, 0x4d, 0x03, 0x37, 0xcf, 0xf8, 0x37, 0xfe, 0xff, 0x67, + 0xc6, 0x10, 0x19, 0x5d, 0xa4, 0xda, 0x28, 0xa7, 0x68, 0xa4, 0xab, 0xdc, 0x56, 0x79, 0xaa, 0xf3, + 0xe4, 0x0f, 0x81, 0x26, 0x9f, 0x8e, 0xe8, 0x6b, 0xb8, 0x63, 0xab, 0xdc, 0x16, 0xa6, 0xd4, 0xae, + 0x54, 0xd2, 0x32, 0xd2, 0x6f, 0x0e, 0x4e, 0x86, 0xa7, 0xe9, 0x06, 0x4d, 0xf9, 0x74, 0x94, 0x7e, + 0xaa, 0xf2, 0x8f, 0xda, 0x59, 0x7e, 0x1d, 0xa6, 0xcf, 0xa1, 0xa3, 0xab, 0xfc, 0xb2, 0xb4, 0xe7, + 0xac, 0x81, 0x75, 0x74, 0xa7, 0xee, 0xbd, 0xb0, 0x36, 0x5b, 0x08, 0xbe, 0x46, 0xe8, 0x4b, 0xe8, + 0x14, 0x4a, 0x3a, 0xa3, 0x2e, 0x59, 0xb3, 0x4f, 0x06, 0x27, 0xc3, 0x87, 0x3b, 0xf4, 0xa8, 0xbe, + 0xd9, 0x14, 0x05, 0xf2, 0xd1, 0x1b, 0xe8, 0x04, 0x71, 0xfa, 0x04, 0xa2, 0x20, 0x9f, 0x0b, 0x46, + 0xfa, 0x64, 0x70, 0xcc, 0xb7, 0x09, 0xca, 0xa0, 0xe3, 0x94, 0x2e, 0x8b, 0x72, 0xc6, 0x1a, 0x7d, + 0x32, 0x88, 0xf8, 0x3a, 0x4c, 0x7e, 0x10, 0xe8, 0x84, 0x77, 0x29, 0x85, 0xd6, 0xdc, 0xa8, 0x25, + 0x96, 0xc7, 0x1c, 0xcf, 0x3e, 0x37, 0xcb, 0x5c, 0x86, 0x65, 0x31, 0xc7, 0x33, 0xbd, 0x0f, 0x6d, + 0x2b, 0xbe, 0x48, 0x85, 0x4e, 0x63, 0x5e, 0x07, 0x3e, 0x8b, 0x8f, 0xb2, 0x16, 0x2a, 0xd4, 0x01, + 0xfa, 0x2a, 0x17, 0x32, 0x73, 0x95, 0x11, 0xac, 0x8d, 0xfc, 0x36, 0x41, 0xbb, 0xd0, 0xbc, 0x10, + 0xdf, 0xd8, 0x11, 0xe6, 0xfd, 0x31, 0xf9, 0xde, 0x80, 0xbb, 0xd7, 0xdb, 0xa5, 0x2f, 0xa0, 0x5d, + 0x9e, 0x67, 0x57, 0x22, 0x8c, 0xff, 0xc1, 0xfe, 0x60, 0x26, 0x67, 0xd9, 0x95, 0xe0, 0x35, 0x85, + 0xf8, 0xd7, 0x4c, 0xba, 0x30, 0xf5, 0xdb, 0xf0, 0xcf, 0x99, 0x74, 0xbc, 0xa6, 0x3c, 0xbe, 0x30, + 0xd9, 0xdc, 0xb1, 0xe6, 0x21, 0xfc, 0x9d, 0xbf, 0xe6, 0x35, 0xe5, 0x71, 0x6d, 0x2a, 0x29, 0x58, + 0xeb, 0x10, 0x3e, 0xf5, 0xd7, 0xbc, 0xa6, 0xe8, 0x2b, 0x88, 0xca, 0x99, 0x92, 0x0e, 0x0d, 0xb5, + 0xb1, 0xe4, 0xf1, 0x2d, 0x86, 0xc6, 0x4a, 0x3a, 0x34, 0xb5, 0xa5, 0x93, 0x33, 0x88, 0x77, 0xdb, + 0xdb, 0xec, 0x70, 0x32, 0xc6, 0x05, 0xad, 0x77, 0x38, 0x19, 0xd3, 0x1e, 0xc0, 0xb2, 0x9e, 0xd5, + 0x64, 0x6c, 0xb1, 0xed, 0x88, 0xef, 0x64, 0x92, 0x74, 0xfb, 0x92, 0x17, 0xb9, 0xc1, 0x93, 0x3d, + 0x7e, 0xb0, 0xe1, 0xb1, 0xf5, 0xc3, 0xca, 0xc9, 0x72, 0x43, 0x62, 0xd7, 0xff, 0xf0, 0xf8, 0x14, + 0xda, 0x5a, 0x08, 0x63, 0xc3, 0x56, 0xee, 0xed, 0x0c, 0x61, 0x2a, 0x84, 0x99, 0xc8, 0xb9, 0xe2, + 0x35, 0xe1, 0x1f, 0xc9, 0xb3, 0xe2, 0x42, 0xcd, 0xe7, 0xf8, 0xc1, 0x5a, 0x7c, 0x1d, 0x26, 0x43, + 0xe8, 0xde, 0x9c, 0xd8, 0x7f, 0x9b, 0xf9, 0x00, 0xc7, 0x6b, 0x01, 0x7a, 0x0a, 0x47, 0x5e, 0x22, + 0xb8, 0x8b, 0x79, 0x88, 0xe8, 0x33, 0xe8, 0xfa, 0x3f, 0x29, 0x66, 0x9e, 0xe4, 0xa2, 0x50, 0x66, + 0x16, 0x3e, 0xfc, 0x5e, 0xfe, 0x6d, 0xfc, 0x73, 0xd5, 0x23, 0xbf, 0x56, 0x3d, 0xf2, 0x7b, 0xd5, + 0x23, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xba, 0x73, 0x8e, 0xbf, 0x41, 0x04, 0x00, 0x00, } func (m *RPC) Marshal() (dAtA []byte, err error) { @@ -819,6 +878,20 @@ func (m *ControlMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -1044,6 +1117,42 @@ func (m *ControlPrune) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ControlIDontWant) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControlIDontWant) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControlIDontWant) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintRpc(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *PeerInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1209,6 +1318,12 @@ func (m *ControlMessage) Size() (n int) { n += 1 + l + sovRpc(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovRpc(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -1296,6 +1411,24 @@ func (m *ControlPrune) Size() (n int) { return n } +func (m *ControlIDontWant) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, s := range m.MessageIDs { + l = len(s) + n += 1 + l + sovRpc(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PeerInfo) Size() (n int) { if m == nil { return 0 @@ -2001,6 +2134,40 @@ func (m *ControlMessage) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &ControlIDontWant{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -2444,6 +2611,89 @@ func (m *ControlPrune) Unmarshal(dAtA []byte) error { } return nil } +func (m *ControlIDontWant) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWant: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWant: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRpc(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRpc + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PeerInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pb/rpc.proto b/pb/rpc.proto index e5df8401..bd0234c3 100644 --- a/pb/rpc.proto +++ b/pb/rpc.proto @@ -28,6 +28,7 @@ message ControlMessage { repeated ControlIWant iwant = 2; repeated ControlGraft graft = 3; repeated ControlPrune prune = 4; + repeated ControlIDontWant idontwant = 5; } message ControlIHave { @@ -51,7 +52,12 @@ message ControlPrune { optional uint64 backoff = 3; } +message ControlIDontWant { + // implementors from other languages should use bytes here - go protobuf emits invalid utf8 strings + repeated string messageIDs = 1; +} + message PeerInfo { optional bytes peerID = 1; optional bytes signedPeerRecord = 2; -} \ No newline at end of file +} diff --git a/pb/trace.pb.go b/pb/trace.pb.go index dd806155..04f1ec1b 100644 --- a/pb/trace.pb.go +++ b/pb/trace.pb.go @@ -1159,13 +1159,14 @@ func (m *TraceEvent_SubMeta) GetTopic() string { } type TraceEvent_ControlMeta struct { - Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` - Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` - Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` - Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Ihave []*TraceEvent_ControlIHaveMeta `protobuf:"bytes,1,rep,name=ihave" json:"ihave,omitempty"` + Iwant []*TraceEvent_ControlIWantMeta `protobuf:"bytes,2,rep,name=iwant" json:"iwant,omitempty"` + Graft []*TraceEvent_ControlGraftMeta `protobuf:"bytes,3,rep,name=graft" json:"graft,omitempty"` + Prune []*TraceEvent_ControlPruneMeta `protobuf:"bytes,4,rep,name=prune" json:"prune,omitempty"` + Idontwant []*TraceEvent_ControlIDontWantMeta `protobuf:"bytes,5,rep,name=idontwant" json:"idontwant,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *TraceEvent_ControlMeta) Reset() { *m = TraceEvent_ControlMeta{} } @@ -1229,6 +1230,13 @@ func (m *TraceEvent_ControlMeta) GetPrune() []*TraceEvent_ControlPruneMeta { return nil } +func (m *TraceEvent_ControlMeta) GetIdontwant() []*TraceEvent_ControlIDontWantMeta { + if m != nil { + return m.Idontwant + } + return nil +} + type TraceEvent_ControlIHaveMeta struct { Topic *string `protobuf:"bytes,1,opt,name=topic" json:"topic,omitempty"` MessageIDs [][]byte `protobuf:"bytes,2,rep,name=messageIDs" json:"messageIDs,omitempty"` @@ -1433,6 +1441,53 @@ func (m *TraceEvent_ControlPruneMeta) GetPeers() [][]byte { return nil } +type TraceEvent_ControlIDontWantMeta struct { + MessageIDs [][]byte `protobuf:"bytes,1,rep,name=messageIDs" json:"messageIDs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceEvent_ControlIDontWantMeta) Reset() { *m = TraceEvent_ControlIDontWantMeta{} } +func (m *TraceEvent_ControlIDontWantMeta) String() string { return proto.CompactTextString(m) } +func (*TraceEvent_ControlIDontWantMeta) ProtoMessage() {} +func (*TraceEvent_ControlIDontWantMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_0571941a1d628a80, []int{0, 21} +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.Merge(m, src) +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_Size() int { + return m.Size() +} +func (m *TraceEvent_ControlIDontWantMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TraceEvent_ControlIDontWantMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceEvent_ControlIDontWantMeta proto.InternalMessageInfo + +func (m *TraceEvent_ControlIDontWantMeta) GetMessageIDs() [][]byte { + if m != nil { + return m.MessageIDs + } + return nil +} + type TraceEventBatch struct { Batch []*TraceEvent `protobuf:"bytes,1,rep,name=batch" json:"batch,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1504,76 +1559,79 @@ func init() { proto.RegisterType((*TraceEvent_ControlIWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIWantMeta") proto.RegisterType((*TraceEvent_ControlGraftMeta)(nil), "pubsub.pb.TraceEvent.ControlGraftMeta") proto.RegisterType((*TraceEvent_ControlPruneMeta)(nil), "pubsub.pb.TraceEvent.ControlPruneMeta") + proto.RegisterType((*TraceEvent_ControlIDontWantMeta)(nil), "pubsub.pb.TraceEvent.ControlIDontWantMeta") proto.RegisterType((*TraceEventBatch)(nil), "pubsub.pb.TraceEventBatch") } func init() { proto.RegisterFile("trace.proto", fileDescriptor_0571941a1d628a80) } var fileDescriptor_0571941a1d628a80 = []byte{ - // 999 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0x51, 0x6f, 0xda, 0x56, - 0x14, 0xc7, 0xe7, 0x00, 0x01, 0x0e, 0x84, 0x78, 0x77, 0x6d, 0x65, 0xb1, 0x36, 0x62, 0x59, 0x55, - 0x21, 0x4d, 0x42, 0x6a, 0xa4, 0xa9, 0x0f, 0x6b, 0xab, 0x11, 0xec, 0x26, 0x44, 0x24, 0xb1, 0x0e, - 0x24, 0x7b, 0xcc, 0x0c, 0xdc, 0x35, 0x8e, 0xc0, 0xb6, 0xec, 0x0b, 0x53, 0x9f, 0xf6, 0xb4, 0xef, - 0xd6, 0xb7, 0xed, 0x23, 0x54, 0xf9, 0x24, 0xd3, 0xbd, 0xd7, 0x36, 0x36, 0xd8, 0xb4, 0x8b, 0xfa, - 0xe6, 0x73, 0xf3, 0xff, 0x9d, 0x7b, 0xce, 0xbd, 0xe7, 0x7f, 0x03, 0xd4, 0x98, 0x6f, 0x4d, 0x68, - 0xc7, 0xf3, 0x5d, 0xe6, 0x92, 0xaa, 0xb7, 0x18, 0x07, 0x8b, 0x71, 0xc7, 0x1b, 0x1f, 0x7e, 0x7a, - 0x02, 0x30, 0xe2, 0x7f, 0x32, 0x96, 0xd4, 0x61, 0xa4, 0x03, 0x45, 0xf6, 0xc1, 0xa3, 0x9a, 0xd2, - 0x52, 0xda, 0x8d, 0xa3, 0x66, 0x27, 0x16, 0x76, 0x56, 0xa2, 0xce, 0xe8, 0x83, 0x47, 0x51, 0xe8, - 0xc8, 0x13, 0xd8, 0xf5, 0x28, 0xf5, 0xfb, 0xba, 0xb6, 0xd3, 0x52, 0xda, 0x75, 0x0c, 0x23, 0xf2, - 0x14, 0xaa, 0xcc, 0x9e, 0xd3, 0x80, 0x59, 0x73, 0x4f, 0x2b, 0xb4, 0x94, 0x76, 0x01, 0x57, 0x0b, - 0x64, 0x00, 0x0d, 0x6f, 0x31, 0x9e, 0xd9, 0xc1, 0xed, 0x39, 0x0d, 0x02, 0xeb, 0x3d, 0xd5, 0x8a, - 0x2d, 0xa5, 0x5d, 0x3b, 0x7a, 0x9e, 0xbd, 0x9f, 0x99, 0xd2, 0xe2, 0x1a, 0x4b, 0xfa, 0xb0, 0xe7, - 0xd3, 0x3b, 0x3a, 0x61, 0x51, 0xb2, 0x92, 0x48, 0xf6, 0x63, 0x76, 0x32, 0x4c, 0x4a, 0x31, 0x4d, - 0x12, 0x04, 0x75, 0xba, 0xf0, 0x66, 0xf6, 0xc4, 0x62, 0x34, 0xca, 0xb6, 0x2b, 0xb2, 0xbd, 0xc8, - 0xce, 0xa6, 0xaf, 0xa9, 0x71, 0x83, 0xe7, 0xcd, 0x4e, 0xe9, 0xcc, 0x5e, 0x52, 0x3f, 0xca, 0x58, - 0xde, 0xd6, 0xac, 0x9e, 0xd2, 0xe2, 0x1a, 0x4b, 0x5e, 0x41, 0xd9, 0x9a, 0x4e, 0x4d, 0x4a, 0x7d, - 0xad, 0x22, 0xd2, 0x3c, 0xcb, 0x4e, 0xd3, 0x95, 0x22, 0x8c, 0xd4, 0xe4, 0x57, 0x00, 0x9f, 0xce, - 0xdd, 0x25, 0x15, 0x6c, 0x55, 0xb0, 0xad, 0xbc, 0x23, 0x8a, 0x74, 0x98, 0x60, 0xf8, 0xd6, 0x3e, - 0x9d, 0x2c, 0xd1, 0xec, 0x69, 0xb0, 0x6d, 0x6b, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x0c, 0xa8, 0x33, - 0xe5, 0x60, 0x6d, 0x1b, 0x38, 0x94, 0x22, 0x8c, 0xd4, 0x1c, 0x9c, 0xfa, 0xae, 0xc7, 0xc1, 0xfa, - 0x36, 0x50, 0x97, 0x22, 0x8c, 0xd4, 0x7c, 0x8c, 0xef, 0x5c, 0xdb, 0xd1, 0xf6, 0x04, 0x95, 0x33, - 0xc6, 0x67, 0xae, 0xed, 0xa0, 0xd0, 0x91, 0x97, 0x50, 0x9a, 0x51, 0x6b, 0x49, 0xb5, 0x86, 0x00, - 0xbe, 0xcf, 0x06, 0x06, 0x5c, 0x82, 0x52, 0xc9, 0x91, 0xf7, 0xbe, 0xf5, 0x07, 0xd3, 0xf6, 0xb7, - 0x21, 0x27, 0x5c, 0x82, 0x52, 0xc9, 0x11, 0xcf, 0x5f, 0x38, 0x54, 0x53, 0xb7, 0x21, 0x26, 0x97, - 0xa0, 0x54, 0x36, 0x75, 0x68, 0xa4, 0xa7, 0x9f, 0x3b, 0x6b, 0x2e, 0x3f, 0xfb, 0xba, 0xb0, 0x69, - 0x1d, 0x57, 0x0b, 0xe4, 0x11, 0x94, 0x98, 0xeb, 0xd9, 0x13, 0x61, 0xc7, 0x2a, 0xca, 0xa0, 0xf9, - 0x17, 0xec, 0xa5, 0xc6, 0xfe, 0x33, 0x49, 0x0e, 0xa1, 0xee, 0xd3, 0x09, 0xb5, 0x97, 0x74, 0xfa, - 0xce, 0x77, 0xe7, 0xa1, 0xb5, 0x53, 0x6b, 0xdc, 0xf8, 0x3e, 0xb5, 0x02, 0xd7, 0x11, 0xee, 0xae, - 0x62, 0x18, 0xad, 0x0a, 0x28, 0x26, 0x0b, 0xb8, 0x03, 0x75, 0xdd, 0x29, 0x5f, 0xa1, 0x86, 0x78, - 0xaf, 0x42, 0x72, 0xaf, 0x5b, 0x68, 0xa4, 0x3d, 0xf4, 0x90, 0x23, 0xdb, 0xd8, 0xbf, 0xb0, 0xb9, - 0x7f, 0xf3, 0x15, 0x94, 0x43, 0x9b, 0x25, 0xde, 0x41, 0x25, 0xf5, 0x0e, 0x3e, 0xe2, 0x57, 0xee, - 0x32, 0x37, 0x4a, 0x2e, 0x82, 0xe6, 0x73, 0x80, 0x95, 0xc7, 0xf2, 0xd8, 0xe6, 0xef, 0x50, 0x0e, - 0xad, 0xb4, 0x51, 0x8d, 0x92, 0x71, 0x1a, 0x2f, 0xa1, 0x38, 0xa7, 0xcc, 0x12, 0x3b, 0xe5, 0x7b, - 0xd3, 0xec, 0x9d, 0x53, 0x66, 0xa1, 0x90, 0x36, 0x47, 0x50, 0x0e, 0x3d, 0xc7, 0x8b, 0xe0, 0xae, - 0x1b, 0xb9, 0x51, 0x11, 0x32, 0x7a, 0x60, 0xd6, 0xd0, 0x90, 0x5f, 0x33, 0xeb, 0x53, 0x28, 0x72, - 0xc3, 0xae, 0xae, 0x4b, 0x49, 0x5e, 0xfa, 0x33, 0x28, 0x09, 0x77, 0xe6, 0x18, 0xe0, 0x67, 0x28, - 0x09, 0x27, 0x6e, 0xbb, 0xa7, 0x6c, 0x4c, 0xb8, 0xf1, 0x7f, 0x62, 0x1f, 0x15, 0x28, 0x87, 0xc5, - 0x93, 0x37, 0x50, 0x09, 0x47, 0x2d, 0xd0, 0x94, 0x56, 0xa1, 0x5d, 0x3b, 0xfa, 0x21, 0xbb, 0xdb, - 0x70, 0x58, 0x45, 0xc7, 0x31, 0x42, 0xba, 0x50, 0x0f, 0x16, 0xe3, 0x60, 0xe2, 0xdb, 0x1e, 0xb3, - 0x5d, 0x47, 0xdb, 0x11, 0x29, 0xf2, 0xde, 0xcf, 0xc5, 0x58, 0xe0, 0x29, 0x84, 0xfc, 0x02, 0xe5, - 0x89, 0xeb, 0x30, 0xdf, 0x9d, 0x89, 0x21, 0xce, 0x2d, 0xa0, 0x27, 0x45, 0x22, 0x43, 0x44, 0x34, - 0xbb, 0x50, 0x4b, 0x14, 0xf6, 0xa0, 0xc7, 0xe7, 0x0d, 0x94, 0xc3, 0xc2, 0x38, 0x1e, 0x96, 0x36, - 0x96, 0x3f, 0x31, 0x2a, 0xb8, 0x5a, 0xc8, 0xc1, 0xff, 0xde, 0x81, 0x5a, 0xa2, 0x34, 0xf2, 0x1a, - 0x4a, 0xf6, 0x2d, 0x7f, 0xaa, 0xe5, 0x69, 0xbe, 0xd8, 0xda, 0x4c, 0xff, 0xd4, 0x5a, 0xca, 0x23, - 0x95, 0x90, 0xa0, 0xff, 0xb4, 0x1c, 0x16, 0x1e, 0xe4, 0x67, 0xe8, 0xdf, 0x2c, 0x87, 0x85, 0x34, - 0x87, 0x38, 0x2d, 0xdf, 0xfc, 0xc2, 0x17, 0xd0, 0x62, 0xe0, 0x24, 0x2d, 0x9f, 0xff, 0xd7, 0xd1, - 0xf3, 0x5f, 0xfc, 0x02, 0x5a, 0xcc, 0x9d, 0xa4, 0xe5, 0x7f, 0x82, 0x53, 0x50, 0xd7, 0x9b, 0xca, - 0xf6, 0x02, 0x39, 0x00, 0x88, 0xef, 0x24, 0x10, 0x8d, 0xd6, 0x31, 0xb1, 0xd2, 0x3c, 0x5a, 0x65, - 0x8a, 0x1a, 0x5c, 0x63, 0x94, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x5b, 0x39, 0x4e, 0x7c, 0x1b, 0x2b, - 0xe3, 0x16, 0x72, 0xea, 0xe4, 0x6f, 0x23, 0xa5, 0x7e, 0x54, 0xa2, 0x0c, 0x0e, 0xff, 0x51, 0xa0, - 0xc8, 0x7f, 0x60, 0x92, 0xef, 0x60, 0xdf, 0xbc, 0x3a, 0x1e, 0xf4, 0x87, 0xa7, 0x37, 0xe7, 0xc6, - 0x70, 0xd8, 0x3d, 0x31, 0xd4, 0x6f, 0x08, 0x81, 0x06, 0x1a, 0x67, 0x46, 0x6f, 0x14, 0xaf, 0x29, - 0xe4, 0x31, 0x7c, 0xab, 0x5f, 0x99, 0x83, 0x7e, 0xaf, 0x3b, 0x32, 0xe2, 0xe5, 0x1d, 0xce, 0xeb, - 0xc6, 0xa0, 0x7f, 0x6d, 0x60, 0xbc, 0x58, 0x20, 0x75, 0xa8, 0x74, 0x75, 0xfd, 0xc6, 0x34, 0x0c, - 0x54, 0x8b, 0x64, 0x1f, 0x6a, 0x68, 0x9c, 0x5f, 0x5e, 0x1b, 0x72, 0xa1, 0xc4, 0xff, 0x8c, 0x46, - 0xef, 0xfa, 0x06, 0xcd, 0x9e, 0xba, 0xcb, 0xa3, 0xa1, 0x71, 0xa1, 0x8b, 0xa8, 0xcc, 0x23, 0x1d, - 0x2f, 0x4d, 0x11, 0x55, 0x48, 0x05, 0x8a, 0x67, 0x97, 0xfd, 0x0b, 0xb5, 0x4a, 0xaa, 0x50, 0x1a, - 0x18, 0xdd, 0x6b, 0x43, 0x05, 0xfe, 0x79, 0x82, 0xdd, 0x77, 0x23, 0xb5, 0xc6, 0x3f, 0x4d, 0xbc, - 0xba, 0x30, 0xd4, 0xfa, 0xe1, 0x5b, 0xd8, 0x5f, 0xdd, 0xef, 0xb1, 0xc5, 0x26, 0xb7, 0xe4, 0x27, - 0x28, 0x8d, 0xf9, 0x47, 0x38, 0xc4, 0x8f, 0x33, 0x47, 0x01, 0xa5, 0xe6, 0xb8, 0xfe, 0xf1, 0xfe, - 0x40, 0xf9, 0xf7, 0xfe, 0x40, 0xf9, 0x74, 0x7f, 0xa0, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xdb, - 0x3a, 0x1c, 0xe4, 0xc9, 0x0b, 0x00, 0x00, + // 1027 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xdf, 0x6e, 0xe2, 0x46, + 0x14, 0xc6, 0xeb, 0x80, 0x03, 0x1c, 0x08, 0x71, 0xa7, 0xd9, 0xd6, 0x72, 0x77, 0x23, 0x9a, 0xae, + 0x56, 0xa8, 0x95, 0x90, 0x36, 0x52, 0xbb, 0x17, 0xdd, 0x5d, 0x95, 0x60, 0x6f, 0x42, 0x44, 0x12, + 0x6b, 0x20, 0xe9, 0x65, 0x6a, 0x60, 0xba, 0x71, 0x04, 0xb6, 0x65, 0x0f, 0x54, 0x7b, 0xd5, 0xd7, + 0xdb, 0xbb, 0xed, 0x23, 0x54, 0x79, 0x92, 0x6a, 0x66, 0xfc, 0x07, 0x83, 0xed, 0xec, 0x46, 0xb9, + 0xf3, 0x19, 0xbe, 0xdf, 0x99, 0x33, 0x67, 0xce, 0x37, 0x02, 0xea, 0xd4, 0xb7, 0x26, 0xa4, 0xe3, + 0xf9, 0x2e, 0x75, 0x51, 0xcd, 0x5b, 0x8c, 0x83, 0xc5, 0xb8, 0xe3, 0x8d, 0x0f, 0xee, 0xbe, 0x03, + 0x18, 0xb1, 0x9f, 0x8c, 0x25, 0x71, 0x28, 0xea, 0x40, 0x99, 0x7e, 0xf0, 0x88, 0x2a, 0xb5, 0xa4, + 0x76, 0xf3, 0x50, 0xeb, 0xc4, 0xc2, 0x4e, 0x22, 0xea, 0x8c, 0x3e, 0x78, 0x04, 0x73, 0x1d, 0xfa, + 0x16, 0xb6, 0x3d, 0x42, 0xfc, 0xbe, 0xae, 0x6e, 0xb5, 0xa4, 0x76, 0x03, 0x87, 0x11, 0x7a, 0x0a, + 0x35, 0x6a, 0xcf, 0x49, 0x40, 0xad, 0xb9, 0xa7, 0x96, 0x5a, 0x52, 0xbb, 0x84, 0x93, 0x05, 0x34, + 0x80, 0xa6, 0xb7, 0x18, 0xcf, 0xec, 0xe0, 0xe6, 0x8c, 0x04, 0x81, 0xf5, 0x9e, 0xa8, 0xe5, 0x96, + 0xd4, 0xae, 0x1f, 0x3e, 0xcf, 0xde, 0xcf, 0x4c, 0x69, 0xf1, 0x1a, 0x8b, 0xfa, 0xb0, 0xe3, 0x93, + 0x5b, 0x32, 0xa1, 0x51, 0x32, 0x99, 0x27, 0xfb, 0x31, 0x3b, 0x19, 0x5e, 0x95, 0xe2, 0x34, 0x89, + 0x30, 0x28, 0xd3, 0x85, 0x37, 0xb3, 0x27, 0x16, 0x25, 0x51, 0xb6, 0x6d, 0x9e, 0xed, 0x45, 0x76, + 0x36, 0x7d, 0x4d, 0x8d, 0x37, 0x78, 0x76, 0xd8, 0x29, 0x99, 0xd9, 0x4b, 0xe2, 0x47, 0x19, 0x2b, + 0x45, 0x87, 0xd5, 0x53, 0x5a, 0xbc, 0xc6, 0xa2, 0x57, 0x50, 0xb1, 0xa6, 0x53, 0x93, 0x10, 0x5f, + 0xad, 0xf2, 0x34, 0xcf, 0xb2, 0xd3, 0x74, 0x85, 0x08, 0x47, 0x6a, 0xf4, 0x3b, 0x80, 0x4f, 0xe6, + 0xee, 0x92, 0x70, 0xb6, 0xc6, 0xd9, 0x56, 0x5e, 0x8b, 0x22, 0x1d, 0x5e, 0x61, 0xd8, 0xd6, 0x3e, + 0x99, 0x2c, 0xb1, 0xd9, 0x53, 0xa1, 0x68, 0x6b, 0x2c, 0x44, 0x38, 0x52, 0x33, 0x30, 0x20, 0xce, + 0x94, 0x81, 0xf5, 0x22, 0x70, 0x28, 0x44, 0x38, 0x52, 0x33, 0x70, 0xea, 0xbb, 0x1e, 0x03, 0x1b, + 0x45, 0xa0, 0x2e, 0x44, 0x38, 0x52, 0xb3, 0x31, 0xbe, 0x75, 0x6d, 0x47, 0xdd, 0xe1, 0x54, 0xce, + 0x18, 0x9f, 0xba, 0xb6, 0x83, 0xb9, 0x0e, 0xbd, 0x04, 0x79, 0x46, 0xac, 0x25, 0x51, 0x9b, 0x1c, + 0xf8, 0x3e, 0x1b, 0x18, 0x30, 0x09, 0x16, 0x4a, 0x86, 0xbc, 0xf7, 0xad, 0xbf, 0xa8, 0xba, 0x5b, + 0x84, 0x1c, 0x33, 0x09, 0x16, 0x4a, 0x86, 0x78, 0xfe, 0xc2, 0x21, 0xaa, 0x52, 0x84, 0x98, 0x4c, + 0x82, 0x85, 0x52, 0xd3, 0xa1, 0x99, 0x9e, 0x7e, 0xe6, 0xac, 0xb9, 0xf8, 0xec, 0xeb, 0xdc, 0xa6, + 0x0d, 0x9c, 0x2c, 0xa0, 0x3d, 0x90, 0xa9, 0xeb, 0xd9, 0x13, 0x6e, 0xc7, 0x1a, 0x16, 0x81, 0xf6, + 0x0f, 0xec, 0xa4, 0xc6, 0xfe, 0x9e, 0x24, 0x07, 0xd0, 0xf0, 0xc9, 0x84, 0xd8, 0x4b, 0x32, 0x7d, + 0xe7, 0xbb, 0xf3, 0xd0, 0xda, 0xa9, 0x35, 0x66, 0x7c, 0x9f, 0x58, 0x81, 0xeb, 0x70, 0x77, 0xd7, + 0x70, 0x18, 0x25, 0x05, 0x94, 0x57, 0x0b, 0xb8, 0x05, 0x65, 0xdd, 0x29, 0x8f, 0x50, 0x43, 0xbc, + 0x57, 0x69, 0x75, 0xaf, 0x1b, 0x68, 0xa6, 0x3d, 0xf4, 0x90, 0x96, 0x6d, 0xec, 0x5f, 0xda, 0xdc, + 0x5f, 0x7b, 0x05, 0x95, 0xd0, 0x66, 0x2b, 0xef, 0xa0, 0x94, 0x7a, 0x07, 0xf7, 0xd8, 0x95, 0xbb, + 0xd4, 0x8d, 0x92, 0xf3, 0x40, 0x7b, 0x0e, 0x90, 0x78, 0x2c, 0x8f, 0xd5, 0xfe, 0x84, 0x4a, 0x68, + 0xa5, 0x8d, 0x6a, 0xa4, 0x8c, 0x6e, 0xbc, 0x84, 0xf2, 0x9c, 0x50, 0x8b, 0xef, 0x94, 0xef, 0x4d, + 0xb3, 0x77, 0x46, 0xa8, 0x85, 0xb9, 0x54, 0x1b, 0x41, 0x25, 0xf4, 0x1c, 0x2b, 0x82, 0xb9, 0x6e, + 0xe4, 0x46, 0x45, 0x88, 0xe8, 0x81, 0x59, 0x43, 0x43, 0x3e, 0x66, 0xd6, 0xa7, 0x50, 0x66, 0x86, + 0x4d, 0xae, 0x4b, 0x5a, 0xbd, 0xf4, 0x67, 0x20, 0x73, 0x77, 0xe6, 0x18, 0xe0, 0x17, 0x90, 0xb9, + 0x13, 0x8b, 0xee, 0x29, 0x1b, 0xe3, 0x6e, 0xfc, 0x42, 0xec, 0xa3, 0x04, 0x95, 0xb0, 0x78, 0xf4, + 0x06, 0xaa, 0xe1, 0xa8, 0x05, 0xaa, 0xd4, 0x2a, 0xb5, 0xeb, 0x87, 0x3f, 0x64, 0x9f, 0x36, 0x1c, + 0x56, 0x7e, 0xe2, 0x18, 0x41, 0x5d, 0x68, 0x04, 0x8b, 0x71, 0x30, 0xf1, 0x6d, 0x8f, 0xda, 0xae, + 0xa3, 0x6e, 0xf1, 0x14, 0x79, 0xef, 0xe7, 0x62, 0xcc, 0xf1, 0x14, 0x82, 0x7e, 0x83, 0xca, 0xc4, + 0x75, 0xa8, 0xef, 0xce, 0xf8, 0x10, 0xe7, 0x16, 0xd0, 0x13, 0x22, 0x9e, 0x21, 0x22, 0xb4, 0x2e, + 0xd4, 0x57, 0x0a, 0x7b, 0xd0, 0xe3, 0xf3, 0x06, 0x2a, 0x61, 0x61, 0x0c, 0x0f, 0x4b, 0x1b, 0x8b, + 0xbf, 0x18, 0x55, 0x9c, 0x2c, 0xe4, 0xe0, 0x9f, 0xb6, 0xa0, 0xbe, 0x52, 0x1a, 0x7a, 0x0d, 0xb2, + 0x7d, 0xc3, 0x9e, 0x6a, 0xd1, 0xcd, 0x17, 0x85, 0x87, 0xe9, 0x9f, 0x58, 0x4b, 0xd1, 0x52, 0x01, + 0x71, 0xfa, 0x6f, 0xcb, 0xa1, 0x61, 0x23, 0xef, 0xa1, 0xff, 0xb0, 0x1c, 0x1a, 0xd2, 0x0c, 0x62, + 0xb4, 0x78, 0xf3, 0x4b, 0x9f, 0x41, 0xf3, 0x81, 0x13, 0xb4, 0x78, 0xfe, 0x5f, 0x47, 0xcf, 0x7f, + 0xf9, 0x33, 0x68, 0x3e, 0x77, 0x82, 0xe6, 0x10, 0x3a, 0x81, 0x9a, 0x3d, 0x75, 0x1d, 0xca, 0xab, + 0x97, 0x79, 0x86, 0x9f, 0x8a, 0xab, 0xd7, 0x5d, 0x87, 0xc6, 0x27, 0x48, 0x60, 0xed, 0x04, 0x94, + 0xf5, 0xf6, 0x64, 0xbb, 0x0a, 0xed, 0x03, 0xc4, 0xb7, 0x1b, 0xf0, 0x96, 0x35, 0xf0, 0xca, 0x8a, + 0x76, 0x98, 0x64, 0x8a, 0x36, 0x5a, 0x63, 0xa4, 0x0d, 0xa6, 0x1d, 0x33, 0x71, 0x83, 0x72, 0x3c, + 0xfd, 0x36, 0x56, 0xc6, 0xcd, 0xc8, 0xa9, 0x93, 0xbd, 0xb2, 0x84, 0xf8, 0x51, 0x89, 0x22, 0xd0, + 0x7e, 0x85, 0xbd, 0xac, 0x56, 0xdc, 0x57, 0xe1, 0xc1, 0x27, 0x09, 0xca, 0xec, 0x2f, 0x2e, 0xfa, + 0x06, 0x76, 0xcd, 0xcb, 0xa3, 0x41, 0x7f, 0x78, 0x72, 0x7d, 0x66, 0x0c, 0x87, 0xdd, 0x63, 0x43, + 0xf9, 0x0a, 0x21, 0x68, 0x62, 0xe3, 0xd4, 0xe8, 0x8d, 0xe2, 0x35, 0x09, 0x3d, 0x81, 0xaf, 0xf5, + 0x4b, 0x73, 0xd0, 0xef, 0x75, 0x47, 0x46, 0xbc, 0xbc, 0xc5, 0x78, 0xdd, 0x18, 0xf4, 0xaf, 0x0c, + 0x1c, 0x2f, 0x96, 0x50, 0x03, 0xaa, 0x5d, 0x5d, 0xbf, 0x36, 0x0d, 0x03, 0x2b, 0x65, 0xb4, 0x0b, + 0x75, 0x6c, 0x9c, 0x5d, 0x5c, 0x19, 0x62, 0x41, 0x66, 0x3f, 0x63, 0xa3, 0x77, 0x75, 0x8d, 0xcd, + 0x9e, 0xb2, 0xcd, 0xa2, 0xa1, 0x71, 0xae, 0xf3, 0xa8, 0xc2, 0x22, 0x1d, 0x5f, 0x98, 0x3c, 0xaa, + 0xa2, 0x2a, 0x94, 0x4f, 0x2f, 0xfa, 0xe7, 0x4a, 0x0d, 0xd5, 0x40, 0x1e, 0x18, 0xdd, 0x2b, 0x43, + 0x01, 0xf6, 0x79, 0x8c, 0xbb, 0xef, 0x46, 0x4a, 0x9d, 0x7d, 0x9a, 0xf8, 0xf2, 0xdc, 0x50, 0x1a, + 0x07, 0x6f, 0x61, 0x37, 0x99, 0x8f, 0x23, 0x8b, 0x4e, 0x6e, 0xd0, 0xcf, 0x20, 0x8f, 0xd9, 0x47, + 0x68, 0xa3, 0x27, 0x99, 0xa3, 0x84, 0x85, 0xe6, 0xa8, 0xf1, 0xf1, 0x6e, 0x5f, 0xfa, 0xf7, 0x6e, + 0x5f, 0xfa, 0xef, 0x6e, 0x5f, 0xfa, 0x3f, 0x00, 0x00, 0xff, 0xff, 0x17, 0x7f, 0xbd, 0x0d, 0x4b, + 0x0c, 0x00, 0x00, } func (m *TraceEvent) Marshal() (dAtA []byte, err error) { @@ -2509,6 +2567,20 @@ func (m *TraceEvent_ControlMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Idontwant) > 0 { + for iNdEx := len(m.Idontwant) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Idontwant[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if len(m.Prune) > 0 { for iNdEx := len(m.Prune) - 1; iNdEx >= 0; iNdEx-- { { @@ -2724,6 +2796,42 @@ func (m *TraceEvent_ControlPruneMeta) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *TraceEvent_ControlIDontWantMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TraceEvent_ControlIDontWantMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.MessageIDs) > 0 { + for iNdEx := len(m.MessageIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MessageIDs[iNdEx]) + copy(dAtA[i:], m.MessageIDs[iNdEx]) + i = encodeVarintTrace(dAtA, i, uint64(len(m.MessageIDs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *TraceEventBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3211,6 +3319,12 @@ func (m *TraceEvent_ControlMeta) Size() (n int) { n += 1 + l + sovTrace(uint64(l)) } } + if len(m.Idontwant) > 0 { + for _, e := range m.Idontwant { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3295,6 +3409,24 @@ func (m *TraceEvent_ControlPruneMeta) Size() (n int) { return n } +func (m *TraceEvent_ControlIDontWantMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MessageIDs) > 0 { + for _, b := range m.MessageIDs { + l = len(b) + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *TraceEventBatch) Size() (n int) { if m == nil { return 0 @@ -6032,6 +6164,40 @@ func (m *TraceEvent_ControlMeta) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Idontwant", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Idontwant = append(m.Idontwant, &TraceEvent_ControlIDontWantMeta{}) + if err := m.Idontwant[len(m.Idontwant)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) @@ -6453,6 +6619,89 @@ func (m *TraceEvent_ControlPruneMeta) Unmarshal(dAtA []byte) error { } return nil } +func (m *TraceEvent_ControlIDontWantMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControlIDontWantMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControlIDontWantMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageIDs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MessageIDs = append(m.MessageIDs, make([]byte, postIndex-iNdEx)) + copy(m.MessageIDs[len(m.MessageIDs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *TraceEventBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pb/trace.proto b/pb/trace.proto index 7f834020..5ee8401c 100644 --- a/pb/trace.proto +++ b/pb/trace.proto @@ -124,6 +124,7 @@ message TraceEvent { repeated ControlIWantMeta iwant = 2; repeated ControlGraftMeta graft = 3; repeated ControlPruneMeta prune = 4; + repeated ControlIDontWantMeta idontwant = 5; } message ControlIHaveMeta { @@ -143,6 +144,10 @@ message TraceEvent { optional string topic = 1; repeated bytes peers = 2; } + + message ControlIDontWantMeta { + repeated bytes messageIDs = 1; + } } message TraceEventBatch { diff --git a/pubsub.go b/pubsub.go index 24c297dd..3ca14abb 100644 --- a/pubsub.go +++ b/pubsub.go @@ -147,7 +147,7 @@ type PubSub struct { blacklist Blacklist blacklistPeer chan peer.ID - peers map[peer.ID]chan *RPC + peers map[peer.ID]*rpcQueue inboundStreamsMx sync.Mutex inboundStreams map[peer.ID]network.Stream @@ -196,11 +196,14 @@ type PubSubRouter interface { // EnoughPeers returns whether the router needs more peers before it's ready to publish new records. // Suggested (if greater than 0) is a suggested number of peers that the router should need. EnoughPeers(topic string, suggested int) bool - // AcceptFrom is invoked on any incoming message before pushing it to the validation pipeline + // AcceptFrom is invoked on any RPC envelope before pushing it to the validation pipeline // or processing control information. // Allows routers with internal scoring to vet peers before committing any processing resources // to the message and implement an effective graylist and react to validation queue overload. AcceptFrom(peer.ID) AcceptStatus + // PreValidation is invoked on messages in the RPC envelope right before pushing it to + // the validation pipeline + PreValidation([]*Message) // HandleRPC is invoked to process control messages in the RPC envelope. // It is invoked after subscriptions and payload messages have been processed. HandleRPC(*RPC) @@ -285,7 +288,7 @@ func NewPubSub(ctx context.Context, h host.Host, rt PubSubRouter, opts ...Option mySubs: make(map[string]map[*Subscription]struct{}), myRelays: make(map[string]int), topics: make(map[string]map[peer.ID]struct{}), - peers: make(map[peer.ID]chan *RPC), + peers: make(map[peer.ID]*rpcQueue), inboundStreams: make(map[peer.ID]network.Stream), blacklist: NewMapBlacklist(), blacklistPeer: make(chan peer.ID), @@ -559,8 +562,8 @@ func WithAppSpecificRpcInspector(inspector func(peer.ID, *RPC) error) Option { func (p *PubSub) processLoop(ctx context.Context) { defer func() { // Clean up go routines. - for _, ch := range p.peers { - close(ch) + for _, queue := range p.peers { + queue.Close() } p.peers = nil p.topics = nil @@ -575,7 +578,7 @@ func (p *PubSub) processLoop(ctx context.Context) { case s := <-p.newPeerStream: pid := s.Conn().RemotePeer() - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { log.Warn("new stream for unknown peer: ", pid) s.Reset() @@ -584,7 +587,7 @@ func (p *PubSub) processLoop(ctx context.Context) { if p.blacklist.Contains(pid) { log.Warn("closing stream for blacklisted peer: ", pid) - close(ch) + q.Close() delete(p.peers, pid) s.Reset() continue @@ -652,9 +655,9 @@ func (p *PubSub) processLoop(ctx context.Context) { log.Infof("Blacklisting peer %s", pid) p.blacklist.Add(pid) - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if ok { - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { if _, ok := tmap[pid]; ok { @@ -701,10 +704,10 @@ func (p *PubSub) handlePendingPeers() { continue } - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - go p.handleNewPeer(p.ctx, pid, messages) - p.peers[pid] = messages + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + go p.handleNewPeer(p.ctx, pid, rpcQueue) + p.peers[pid] = rpcQueue } } @@ -721,12 +724,12 @@ func (p *PubSub) handleDeadPeers() { p.peerDeadPrioLk.Unlock() for pid := range deadPeers { - ch, ok := p.peers[pid] + q, ok := p.peers[pid] if !ok { continue } - close(ch) + q.Close() delete(p.peers, pid) for t, tmap := range p.topics { @@ -748,10 +751,10 @@ func (p *PubSub) handleDeadPeers() { // still connected, must be a duplicate connection being closed. // we respawn the writer as we need to ensure there is a stream active log.Debugf("peer declared dead but still connected; respawning writer: %s", pid) - messages := make(chan *RPC, p.peerOutboundQueueSize) - messages <- p.getHelloPacket() - p.peers[pid] = messages - go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, messages) + rpcQueue := newRpcQueue(p.peerOutboundQueueSize) + rpcQueue.Push(p.getHelloPacket(), true) + p.peers[pid] = rpcQueue + go p.handleNewPeerWithBackoff(p.ctx, pid, backoffDelay, rpcQueue) } } } @@ -915,14 +918,14 @@ func (p *PubSub) announce(topic string, sub bool) { out := rpcWithSubs(subopt) for pid, peer := range p.peers { - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + continue } + p.tracer.SendRPC(out, pid) } } @@ -958,14 +961,14 @@ func (p *PubSub) doAnnounceRetry(pid peer.ID, topic string, sub bool) { } out := rpcWithSubs(subopt) - select { - case peer <- out: - p.tracer.SendRPC(out, pid) - default: + err := peer.Push(out, false) + if err != nil { log.Infof("Can't send announce message to peer %s: queue full; scheduling retry", pid) p.tracer.DropRPC(out, pid) go p.announceRetry(pid, topic, sub) + return } + p.tracer.SendRPC(out, pid) } // notifySubs sends a given message to all corresponding subscribers. @@ -1091,13 +1094,21 @@ func (p *PubSub) handleIncomingRPC(rpc *RPC) { p.tracer.ThrottlePeer(rpc.from) case AcceptAll: + var toPush []*Message for _, pmsg := range rpc.GetPublish() { if !(p.subscribedToMsg(pmsg) || p.canRelayMsg(pmsg)) { log.Debug("received message in topic we didn't subscribe to; ignoring message") continue } - p.pushMsg(&Message{pmsg, "", rpc.from, nil, false}) + msg := &Message{pmsg, "", rpc.from, nil, false} + if p.shouldPush(msg) { + toPush = append(toPush, msg) + } + } + p.rt.PreValidation(toPush) + for _, msg := range toPush { + p.pushMsg(msg) } } @@ -1114,27 +1125,28 @@ func DefaultPeerFilter(pid peer.ID, topic string) bool { return true } -// pushMsg pushes a message performing validation as necessary -func (p *PubSub) pushMsg(msg *Message) { +// shouldPush filters a message before validating and pushing it +// It returns true if the message can be further validated and pushed +func (p *PubSub) shouldPush(msg *Message) bool { src := msg.ReceivedFrom // reject messages from blacklisted peers if p.blacklist.Contains(src) { log.Debugf("dropping message from blacklisted peer %s", src) p.tracer.RejectMessage(msg, RejectBlacklstedPeer) - return + return false } // even if they are forwarded by good peers if p.blacklist.Contains(msg.GetFrom()) { log.Debugf("dropping message from blacklisted source %s", src) p.tracer.RejectMessage(msg, RejectBlacklistedSource) - return + return false } err := p.checkSigningPolicy(msg) if err != nil { log.Debugf("dropping message from %s: %s", src, err) - return + return false } // reject messages claiming to be from ourselves but not locally published @@ -1142,16 +1154,24 @@ func (p *PubSub) pushMsg(msg *Message) { if peer.ID(msg.GetFrom()) == self && src != self { log.Debugf("dropping message claiming to be from self but forwarded from %s", src) p.tracer.RejectMessage(msg, RejectSelfOrigin) - return + return false } // have we already seen and validated this message? id := p.idGen.ID(msg) if p.seenMessage(id) { p.tracer.DuplicateMessage(msg) - return + return false } + return true +} + +// pushMsg pushes a message performing validation as necessary +func (p *PubSub) pushMsg(msg *Message) { + src := msg.ReceivedFrom + id := p.idGen.ID(msg) + if !p.val.Push(src, msg) { return } diff --git a/randomsub.go b/randomsub.go index f29b923f..4e410f5f 100644 --- a/randomsub.go +++ b/randomsub.go @@ -94,6 +94,8 @@ func (rs *RandomSubRouter) AcceptFrom(peer.ID) AcceptStatus { return AcceptAll } +func (rs *RandomSubRouter) PreValidation([]*Message) {} + func (rs *RandomSubRouter) HandleRPC(rpc *RPC) {} func (rs *RandomSubRouter) Publish(msg *Message) { @@ -144,18 +146,18 @@ func (rs *RandomSubRouter) Publish(msg *Message) { out := rpcWithMessages(msg.Message) for p := range tosend { - mch, ok := rs.p.peers[p] + q, ok := rs.p.peers[p] if !ok { continue } - select { - case mch <- out: - rs.tracer.SendRPC(out, p) - default: + err := q.Push(out, false) + if err != nil { log.Infof("dropping message to peer %s: queue full", p) rs.tracer.DropRPC(out, p) + continue } + rs.tracer.SendRPC(out, p) } } diff --git a/rpc_queue.go b/rpc_queue.go new file mode 100644 index 00000000..e5c22935 --- /dev/null +++ b/rpc_queue.go @@ -0,0 +1,147 @@ +package pubsub + +import ( + "context" + "errors" + "sync" +) + +var ( + ErrQueueCancelled = errors.New("rpc queue operation cancelled") + ErrQueueClosed = errors.New("rpc queue closed") + ErrQueueFull = errors.New("rpc queue full") + ErrQueuePushOnClosed = errors.New("push on closed rpc queue") +) + +type priorityQueue struct { + normal []*RPC + priority []*RPC +} + +func (q *priorityQueue) Len() int { + return len(q.normal) + len(q.priority) +} + +func (q *priorityQueue) NormalPush(rpc *RPC) { + q.normal = append(q.normal, rpc) +} + +func (q *priorityQueue) PriorityPush(rpc *RPC) { + q.priority = append(q.priority, rpc) +} + +func (q *priorityQueue) Pop() *RPC { + var rpc *RPC + + if len(q.priority) > 0 { + rpc = q.priority[0] + q.priority[0] = nil + q.priority = q.priority[1:] + } else if len(q.normal) > 0 { + rpc = q.normal[0] + q.normal[0] = nil + q.normal = q.normal[1:] + } + + return rpc +} + +type rpcQueue struct { + dataAvailable sync.Cond + spaceAvailable sync.Cond + // Mutex used to access queue + queueMu sync.Mutex + queue priorityQueue + + closed bool + maxSize int +} + +func newRpcQueue(maxSize int) *rpcQueue { + q := &rpcQueue{maxSize: maxSize} + q.dataAvailable.L = &q.queueMu + q.spaceAvailable.L = &q.queueMu + return q +} + +func (q *rpcQueue) Push(rpc *RPC, block bool) error { + return q.push(rpc, false, block) +} + +func (q *rpcQueue) UrgentPush(rpc *RPC, block bool) error { + return q.push(rpc, true, block) +} + +func (q *rpcQueue) push(rpc *RPC, urgent bool, block bool) error { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + panic(ErrQueuePushOnClosed) + } + + for q.queue.Len() == q.maxSize { + if block { + q.spaceAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + panic(ErrQueuePushOnClosed) + } + } else { + return ErrQueueFull + } + } + if urgent { + q.queue.PriorityPush(rpc) + } else { + q.queue.NormalPush(rpc) + } + + q.dataAvailable.Signal() + return nil +} + +// Note that, when the queue is empty and there are two blocked Pop calls, it +// doesn't mean that the first Pop will get the item from the next Push. The +// second Pop will probably get it instead. +func (q *rpcQueue) Pop(ctx context.Context) (*RPC, error) { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + if q.closed { + return nil, ErrQueueClosed + } + + unregisterAfterFunc := context.AfterFunc(ctx, func() { + // Wake up all the waiting routines. The only routine that correponds + // to this Pop call will return from the function. Note that this can + // be expensive, if there are too many waiting routines. + q.dataAvailable.Broadcast() + }) + defer unregisterAfterFunc() + + for q.queue.Len() == 0 { + select { + case <-ctx.Done(): + return nil, ErrQueueCancelled + default: + } + q.dataAvailable.Wait() + // It can receive a signal because the queue is closed. + if q.closed { + return nil, ErrQueueClosed + } + } + rpc := q.queue.Pop() + q.spaceAvailable.Signal() + return rpc, nil +} + +func (q *rpcQueue) Close() { + q.queueMu.Lock() + defer q.queueMu.Unlock() + + q.closed = true + q.dataAvailable.Broadcast() + q.spaceAvailable.Broadcast() +} diff --git a/rpc_queue_test.go b/rpc_queue_test.go new file mode 100644 index 00000000..6e92ee56 --- /dev/null +++ b/rpc_queue_test.go @@ -0,0 +1,229 @@ +package pubsub + +import ( + "context" + "testing" + "time" +) + +func TestNewRpcQueue(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + if q.maxSize != maxSize { + t.Fatalf("rpc queue has wrong max size, expected %d but got %d", maxSize, q.maxSize) + } + if q.dataAvailable.L != &q.queueMu { + t.Fatalf("the dataAvailable field of rpc queue has an incorrect mutex") + } + if q.spaceAvailable.L != &q.queueMu { + t.Fatalf("the spaceAvailable field of rpc queue has an incorrect mutex") + } +} + +func TestRpcQueueUrgentPush(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + rpc3 := &RPC{} + rpc4 := &RPC{} + q.Push(rpc1, true) + q.UrgentPush(rpc2, true) + q.Push(rpc3, true) + q.UrgentPush(rpc4, true) + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop3, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop4, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc4 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop3 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop4 != rpc3 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueuePushThenPop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + q.Push(rpc1, true) + q.Push(rpc2, true) + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueuePopThenPush(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + + rpc1 := &RPC{} + rpc2 := &RPC{} + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Push(rpc1, true) + q.Push(rpc2, true) + }() + pop1, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + pop2, err := q.Pop(context.Background()) + if err != nil { + t.Fatal(err) + } + if pop1 != rpc1 { + t.Fatalf("get wrong item from rpc queue Pop") + } + if pop2 != rpc2 { + t.Fatalf("get wrong item from rpc queue Pop") + } +} + +func TestRpcQueueBlockPushWhenFull(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + finished := make(chan struct{}) + q.Push(&RPC{}, true) + go func() { + q.Push(&RPC{}, true) + finished <- struct{}{} + }() + // Wait to make sure the goroutine is blocked. + time.Sleep(1 * time.Millisecond) + select { + case <-finished: + t.Fatalf("blocking rpc queue Push is not blocked when it is full") + default: + } +} + +func TestRpcQueueNonblockPushWhenFull(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + q.Push(&RPC{}, true) + err := q.Push(&RPC{}, false) + if err != ErrQueueFull { + t.Fatalf("non-blocking rpc queue Push returns wrong error when it is full") + } +} + +func TestRpcQueuePushAfterClose(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + q.Close() + + defer func() { + if r := recover(); r == nil { + t.Fatalf("rpc queue Push does not panick after closed") + } + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueuePopAfterClose(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + q.Close() + _, err := q.Pop(context.Background()) + if err != ErrQueueClosed { + t.Fatalf("rpc queue Pop returns wrong error after closed") + } +} + +func TestRpcQueueCloseWhilePush(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + q.Push(&RPC{}, true) + + defer func() { + if r := recover(); r == nil { + t.Fatalf("rpc queue Push does not panick when it's closed on the fly") + } + }() + + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Close() + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueueCloseWhilePop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Close() + }() + _, err := q.Pop(context.Background()) + if err != ErrQueueClosed { + t.Fatalf("rpc queue Pop returns wrong error when it's closed on the fly") + } +} + +func TestRpcQueuePushWhenFullThenPop(t *testing.T) { + maxSize := 1 + q := newRpcQueue(maxSize) + + q.Push(&RPC{}, true) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + q.Pop(context.Background()) + }() + q.Push(&RPC{}, true) +} + +func TestRpcQueueCancelPop(t *testing.T) { + maxSize := 32 + q := newRpcQueue(maxSize) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Wait to make sure the main goroutine is blocked. + time.Sleep(1 * time.Millisecond) + cancel() + }() + _, err := q.Pop(ctx) + if err != ErrQueueCancelled { + t.Fatalf("rpc queue Pop returns wrong error when it's cancelled") + } +} diff --git a/trace.go b/trace.go index 27fac289..7dbb5409 100644 --- a/trace.go +++ b/trace.go @@ -402,11 +402,23 @@ func (t *pubsubTracer) traceRPCMeta(rpc *RPC) *pb.TraceEvent_RPCMeta { }) } + var idontwant []*pb.TraceEvent_ControlIDontWantMeta + for _, ctl := range rpc.Control.Idontwant { + var mids [][]byte + for _, mid := range ctl.MessageIDs { + mids = append(mids, []byte(mid)) + } + idontwant = append(idontwant, &pb.TraceEvent_ControlIDontWantMeta{ + MessageIDs: mids, + }) + } + rpcMeta.Control = &pb.TraceEvent_ControlMeta{ - Ihave: ihave, - Iwant: iwant, - Graft: graft, - Prune: prune, + Ihave: ihave, + Iwant: iwant, + Graft: graft, + Prune: prune, + Idontwant: idontwant, } } From 1aeb6ebc6acea50247d79fd992fc94c5d65d7797 Mon Sep 17 00:00:00 2001 From: sukun Date: Fri, 16 Aug 2024 22:27:24 +0530 Subject: [PATCH 18/21] chore: upgrade go-libp2p (#575) Co-authored-by: Steven Allen --- floodsub_test.go | 3 +- go.mod | 76 ++++++++-------- go.sum | 183 ++++++++++++++++++-------------------- gossipsub_connmgr_test.go | 6 +- 4 files changed, 128 insertions(+), 140 deletions(-) diff --git a/floodsub_test.go b/floodsub_test.go index e7bf379f..8efedaad 100644 --- a/floodsub_test.go +++ b/floodsub_test.go @@ -42,8 +42,7 @@ func checkMessageRouting(t *testing.T, topic string, pubs []*PubSub, subs []*Sub } func connect(t *testing.T, a, b host.Host) { - pinfo := a.Peerstore().PeerInfo(a.ID()) - err := b.Connect(context.Background(), pinfo) + err := b.Connect(context.Background(), peer.AddrInfo{ID: a.ID(), Addrs: a.Addrs()}) if err != nil { t.Fatal(err) } diff --git a/go.mod b/go.mod index bd2a94f0..652266e1 100644 --- a/go.mod +++ b/go.mod @@ -7,38 +7,38 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.34.0 + github.com/libp2p/go-libp2p v0.36.2 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 - github.com/multiformats/go-multiaddr v0.12.4 + github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-varint v0.0.7 ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/elastic/gosigar v0.14.2 // indirect + github.com/elastic/gosigar v0.14.3 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/huin/goupnp v1.3.0 // indirect github.com/ipfs/go-cid v0.4.1 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.17.8 // indirect - github.com/klauspost/cpuid/v2 v2.2.7 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/cpuid/v2 v2.2.8 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect @@ -48,7 +48,7 @@ require ( github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/miekg/dns v1.1.58 // indirect + github.com/miekg/dns v1.1.62 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -61,51 +61,53 @@ require ( github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/multiformats/go-multistream v0.5.0 // indirect - github.com/onsi/ginkgo/v2 v2.15.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.20.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pion/datachannel v1.5.6 // indirect - github.com/pion/dtls/v2 v2.2.11 // indirect - github.com/pion/ice/v2 v2.3.24 // indirect - github.com/pion/interceptor v0.1.29 // indirect + github.com/pion/datachannel v1.5.8 // indirect + github.com/pion/dtls/v2 v2.2.12 // indirect + github.com/pion/ice/v2 v2.3.34 // indirect + github.com/pion/interceptor v0.1.30 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.12 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.14 // indirect - github.com/pion/rtp v1.8.6 // indirect - github.com/pion/sctp v1.8.16 // indirect + github.com/pion/rtp v1.8.9 // indirect + github.com/pion/sctp v1.8.33 // indirect github.com/pion/sdp/v3 v3.0.9 // indirect - github.com/pion/srtp/v2 v2.0.18 // indirect + github.com/pion/srtp/v2 v2.0.20 // indirect github.com/pion/stun v0.6.1 // indirect - github.com/pion/transport/v2 v2.2.5 // indirect + github.com/pion/transport/v2 v2.2.10 // indirect github.com/pion/turn/v2 v2.1.6 // indirect - github.com/pion/webrtc/v3 v3.2.40 // indirect + github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.20.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.48.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/quic-go v0.44.0 // indirect + github.com/quic-go/quic-go v0.46.0 // indirect github.com/quic-go/webtransport-go v0.8.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/testify v1.9.0 // indirect - go.uber.org/dig v1.17.1 // indirect - go.uber.org/fx v1.21.1 // indirect + github.com/wlynxg/anet v0.0.4 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect - golang.org/x/tools v0.21.0 // indirect - google.golang.org/protobuf v1.34.1 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/mod v0.20.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.24.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - lukechampine.com/blake3 v1.2.1 // indirect + lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 5b9ae3b1..cb7aa749 100644 --- a/go.sum +++ b/go.sum @@ -18,8 +18,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= @@ -45,8 +45,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= +github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= @@ -56,10 +56,10 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= @@ -74,8 +74,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -87,16 +85,16 @@ github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -117,10 +115,10 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= -github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM= -github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= +github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -136,8 +134,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.34.0 h1:J+SL3DMz+zPz06OHSRt42GKA5n5hmwgY1l7ckLUz3+c= -github.com/libp2p/go-libp2p v0.34.0/go.mod h1:snyJQix4ET6Tj+LeI0VPjjxTtdWpeOhYt5lEY0KirkQ= +github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= +github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= @@ -162,8 +160,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= +github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -185,8 +183,8 @@ github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9 github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.12.4 h1:rrKqpY9h+n80EwhhC/kkcunCZZ7URIF8yN1WEUt2Hvc= -github.com/multiformats/go-multiaddr v0.12.4/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= +github.com/multiformats/go-multiaddr v0.13.0 h1:BCBzs61E3AGHcYYTv8dqRH43ZfyrqM8RXVPT8t13tLQ= +github.com/multiformats/go-multiaddr v0.13.0/go.mod h1:sBXrNzucqkFJhvKOiwwLyqamGa/P5EIXNPLovyhQCII= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= @@ -203,27 +201,29 @@ github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dy github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= +github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg= -github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4= +github.com/pion/datachannel v1.5.8 h1:ph1P1NsGkazkjrvyMfhRBUAWMxugJjq2HfQifaOoSNo= +github.com/pion/datachannel v1.5.8/go.mod h1:PgmdpoaNBLX9HNzNClmdki4DYW5JtI7Yibu8QzbL3tI= github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s= -github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks= -github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= -github.com/pion/ice/v2 v2.3.24 h1:RYgzhH/u5lH0XO+ABatVKCtRd+4U1GEaCXSMjNr13tI= -github.com/pion/ice/v2 v2.3.24/go.mod h1:KXJJcZK7E8WzrBEYnV4UtqEZsGeWfHxsNqhVcVvgjxw= -github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M= -github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4= +github.com/pion/dtls/v2 v2.2.12 h1:KP7H5/c1EiVAAKUmXyCzPiQe5+bCJrpOeKg/L05dunk= +github.com/pion/dtls/v2 v2.2.12/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE= +github.com/pion/ice/v2 v2.3.34 h1:Ic1ppYCj4tUOcPAp76U6F3fVrlSw8A9JtRXLqw6BbUM= +github.com/pion/ice/v2 v2.3.34/go.mod h1:mBF7lnigdqgtB+YHkaY/Y6s6tsyRyo4u4rPGRuOjUBQ= +github.com/pion/interceptor v0.1.30 h1:au5rlVHsgmxNi+v/mjOPazbW1SHzfx7/hYOEYQnUcxA= +github.com/pion/interceptor v0.1.30/go.mod h1:RQuKT5HTdkP2Fi0cuOS5G5WNymTjzXaGF75J4k7z2nc= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8= @@ -234,52 +234,50 @@ github.com/pion/rtcp v1.2.12/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9 github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE= github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4= github.com/pion/rtp v1.8.3/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw= -github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= -github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA= -github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY= -github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE= +github.com/pion/rtp v1.8.9 h1:E2HX740TZKaqdcPmf4pw6ZZuG8u5RlMMt+l3dxeu6Wk= +github.com/pion/rtp v1.8.9/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU= +github.com/pion/sctp v1.8.33 h1:dSE4wX6uTJBcNm8+YlMg7lw1wqyKHggsP5uKbdj+NZw= +github.com/pion/sctp v1.8.33/go.mod h1:beTnqSzewI53KWoG3nqB282oDMGrhNxBdb+JZnkCwRM= github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY= github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M= -github.com/pion/srtp/v2 v2.0.18 h1:vKpAXfawO9RtTRKZJbG4y0v1b11NZxQnxRl85kGuUlo= -github.com/pion/srtp/v2 v2.0.18/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= +github.com/pion/srtp/v2 v2.0.20 h1:HNNny4s+OUmG280ETrCdgFndp4ufx3/uy85EawYEhTk= +github.com/pion/srtp/v2 v2.0.20/go.mod h1:0KJQjA99A6/a0DOVTu1PhDSw0CXF2jTkqOoMg3ODqdA= github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4= github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8= github.com/pion/transport/v2 v2.2.1/go.mod h1:cXXWavvCnFF6McHTft3DWS9iic2Mftcz1Aq29pGcU5g= -github.com/pion/transport/v2 v2.2.2/go.mod h1:OJg3ojoBJopjEeECq2yJdXH9YVrUJ1uQ++NjXLOUorc= github.com/pion/transport/v2 v2.2.3/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= github.com/pion/transport/v2 v2.2.4/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= -github.com/pion/transport/v2 v2.2.5 h1:iyi25i/21gQck4hfRhomF6SktmUQjRsRW4WJdhfc3Kc= -github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLhCCgpRK4p0= +github.com/pion/transport/v2 v2.2.10 h1:ucLBLE8nuxiHfvkFKnkDQRYWYfp8ejf4YBOPfaQpw6Q= +github.com/pion/transport/v2 v2.2.10/go.mod h1:sq1kSLWs+cHW9E+2fJP95QudkzbK7wscs8yYgQToO5E= github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0= -github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4= -github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0= +github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0= +github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo= github.com/pion/turn/v2 v2.1.3/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= github.com/pion/turn/v2 v2.1.6 h1:Xr2niVsiPTB0FPtt+yAWKFUkU1eotQbGgpTIld4x1Gc= github.com/pion/turn/v2 v2.1.6/go.mod h1:huEpByKKHix2/b9kmTAM3YoX6MKP+/D//0ClgUYR2fY= -github.com/pion/webrtc/v3 v3.2.40 h1:Wtfi6AZMQg+624cvCXUuSmrKWepSB7zfgYDOYqsSOVU= -github.com/pion/webrtc/v3 v3.2.40/go.mod h1:M1RAe3TNTD1tzyvqHrbVODfwdPGSXOUo/OgpoGGJqFY= +github.com/pion/webrtc/v3 v3.3.0 h1:Rf4u6n6U5t5sUxhYPQk/samzU/oDv7jk6BA5hyO2F9I= +github.com/pion/webrtc/v3 v3.3.0/go.mod h1:hVmrDJvwhEertRWObeb1xzulzHGeVUoPlWvxdGzcfU0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= +github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= +github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= -github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= +github.com/quic-go/quic-go v0.46.0 h1:uuwLClEEyk1DNvchH8uCByQVjo3yKL9opKulExNDs7Y= +github.com/quic-go/quic-go v0.46.0/go.mod h1:1dLehS7TIR64+vxGR70GDcatWTOtMX2PUtnKsjbTurI= github.com/quic-go/webtransport-go v0.8.0 h1:HxSrwun11U+LlmwpgM1kEqIqH90IT4N8auv/cD7QFJg= github.com/quic-go/webtransport-go v0.8.0/go.mod h1:N99tjprW432Ut5ONql/aUhSLT0YVSlwHohQsuac9WaM= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= @@ -324,7 +322,6 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -336,16 +333,19 @@ github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cb github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU= github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM= +github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= +github.com/wlynxg/anet v0.0.4 h1:0de1OFQxnNqAu+x2FAKKCVIrnfGKQbs7FQz++tB0+Uw= +github.com/wlynxg/anet v0.0.4/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= -go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= -go.uber.org/fx v1.21.1 h1:RqBh3cYdzZS0uqwVeEjOX2p73dddLpym315myy/Bpb0= -go.uber.org/fx v1.21.1/go.mod h1:HT2M7d7RHo+ebKGh9NRcrsrHHfpZ60nW3QRubMRfv48= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.22.2 h1:iPW+OPxv0G8w75OemJ1RAnTUrF55zOJlXlo1TbJ0Buw= +go.uber.org/fx v1.22.2/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= @@ -369,16 +369,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= -golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= -golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= +golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -390,8 +387,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= +golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -412,13 +409,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -434,8 +428,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -460,34 +454,27 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -506,8 +493,8 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= -golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= +golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -528,8 +515,8 @@ google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -547,7 +534,7 @@ grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJd honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -lukechampine.com/blake3 v1.2.1 h1:YuqqRuaqsGV71BV/nm9xlI0MKUv4QC54jQnBChWbGnI= -lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= +lukechampine.com/blake3 v1.3.0 h1:sJ3XhFINmHSrYCgl958hscfIa3bw8x4DqMP3u1YvoYE= +lukechampine.com/blake3 v1.3.0/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1LM6k= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/gossipsub_connmgr_test.go b/gossipsub_connmgr_test.go index e72f5545..10650a0e 100644 --- a/gossipsub_connmgr_test.go +++ b/gossipsub_connmgr_test.go @@ -98,7 +98,7 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { connectAll(t, honestHosts) for _, h := range honestHosts { - if len(h.Network().Conns()) != nHonest-1 { + if len(h.Network().Peers()) != nHonest-1 { t.Errorf("expected to have conns to all honest peers, have %d", len(h.Network().Conns())) } } @@ -148,8 +148,8 @@ func TestGossipsubConnTagMessageDeliveries(t *testing.T) { for _, h := range honestHosts { nHonestConns := 0 nDishonestConns := 0 - for _, conn := range h.Network().Conns() { - if _, ok := honestPeers[conn.RemotePeer()]; !ok { + for _, p := range h.Network().Peers() { + if _, ok := honestPeers[p]; !ok { nDishonestConns++ } else { nHonestConns++ From b8a6a868adce87101b3f61cb1b1a644db627c59f Mon Sep 17 00:00:00 2001 From: web3-bot Date: Mon, 26 Aug 2024 15:58:30 +0100 Subject: [PATCH 19/21] ci: uci/update-go (#577) This PR was created automatically by the @web3-bot as a part of the [Unified CI](https://github.com/ipdxco/unified-github-workflows) project. --- compat/compat.pb.go | 3 ++- go.mod | 2 +- pb/rpc.pb.go | 3 ++- pb/trace.pb.go | 3 ++- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/compat/compat.pb.go b/compat/compat.pb.go index 607b78ac..57a00dd8 100644 --- a/compat/compat.pb.go +++ b/compat/compat.pb.go @@ -5,10 +5,11 @@ package compat_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/go.mod b/go.mod index 652266e1..437017c8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/libp2p/go-libp2p-pubsub -go 1.21 +go 1.22 require ( github.com/benbjohnson/clock v1.3.5 diff --git a/pb/rpc.pb.go b/pb/rpc.pb.go index 151cb44d..213cdcc3 100644 --- a/pb/rpc.pb.go +++ b/pb/rpc.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. diff --git a/pb/trace.pb.go b/pb/trace.pb.go index 04f1ec1b..9361c393 100644 --- a/pb/trace.pb.go +++ b/pb/trace.pb.go @@ -5,10 +5,11 @@ package pubsub_pb import ( fmt "fmt" - proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" ) // Reference imports to suppress errors if they are not otherwise used. From 4c139741882d3ef6a917e6f3ea403832885b7472 Mon Sep 17 00:00:00 2001 From: Andrew Gillis <11790789+gammazero@users.noreply.github.com> Date: Mon, 9 Sep 2024 08:42:16 -0700 Subject: [PATCH 20/21] Update go-libp2p to latest (#578) - Update to go-libp2p v0.36.3 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 437017c8..3faa4117 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/ipfs/go-log/v2 v2.5.1 github.com/libp2p/go-buffer-pool v0.1.0 - github.com/libp2p/go-libp2p v0.36.2 + github.com/libp2p/go-libp2p v0.36.3 github.com/libp2p/go-libp2p-testing v0.12.0 github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 diff --git a/go.sum b/go.sum index cb7aa749..71a6bd2f 100644 --- a/go.sum +++ b/go.sum @@ -134,8 +134,8 @@ github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6 github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.36.2 h1:BbqRkDaGC3/5xfaJakLV/BrpjlAuYqSB0lRvtzL3B/U= -github.com/libp2p/go-libp2p v0.36.2/go.mod h1:XO3joasRE4Eup8yCTTP/+kX+g92mOgRaadk46LmPhHY= +github.com/libp2p/go-libp2p v0.36.3 h1:NHz30+G7D8Y8YmznrVZZla0ofVANrvBl2c+oARfMeDQ= +github.com/libp2p/go-libp2p v0.36.3/go.mod h1:4Y5vFyCUiJuluEPmpnKYf6WFx5ViKPUYs/ixe9ANFZ8= github.com/libp2p/go-libp2p-asn-util v0.4.1 h1:xqL7++IKD9TBFMgnLPZR6/6iYhawHKHl950SO9L6n94= github.com/libp2p/go-libp2p-asn-util v0.4.1/go.mod h1:d/NI6XZ9qxw67b4e+NgpQexCIiFYJjErASrYW4PFDN8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= From f71345c1ec0ee4b30cd702bb605927f788ff9f36 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Wed, 25 Sep 2024 02:33:35 -0400 Subject: [PATCH 21/21] Do not format expensive debug messages in non-debug levels in doDropRPC (#580) In high load scenarios when consumer is slow, `doDropRPC` is called often and makes extra unnecessary allocations formatting `log.Debug` message. Fixed by checking log level before running expensive formatting. Before: ``` BenchmarkAllocDoDropRPC-10 13684732 76.28 ns/op 144 B/op 3 allocs/op ``` After: ``` BenchmarkAllocDoDropRPC-10 28140273 42.88 ns/op 112 B/op 1 allocs/op ``` --- go.mod | 2 +- gossipsub.go | 6 +++++- gossipsub_test.go | 8 ++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 3faa4117..f1358e71 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/libp2p/go-msgio v0.3.0 github.com/multiformats/go-multiaddr v0.13.0 github.com/multiformats/go-varint v0.0.7 + go.uber.org/zap v1.27.0 ) require ( @@ -98,7 +99,6 @@ require ( go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.26.0 // indirect golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect golang.org/x/mod v0.20.0 // indirect diff --git a/gossipsub.go b/gossipsub.go index 117b585c..dcc5d193 100644 --- a/gossipsub.go +++ b/gossipsub.go @@ -19,6 +19,8 @@ import ( "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/core/record" "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoremem" + + "go.uber.org/zap/zapcore" ) const ( @@ -1334,7 +1336,9 @@ func (gs *GossipSubRouter) sendRPC(p peer.ID, out *RPC, urgent bool) { } func (gs *GossipSubRouter) doDropRPC(rpc *RPC, p peer.ID, reason string) { - log.Debugf("dropping message to peer %s: %s", p, reason) + if log.Level() <= zapcore.DebugLevel { + log.Debugf("dropping message to peer %s: %s", p, reason) + } gs.tracer.DropRPC(rpc, p) // push control messages that need to be retried ctl := rpc.GetControl() diff --git a/gossipsub_test.go b/gossipsub_test.go index 3b45557c..d515654f 100644 --- a/gossipsub_test.go +++ b/gossipsub_test.go @@ -3175,3 +3175,11 @@ func TestGossipsubIdontwantClear(t *testing.T) { <-ctx.Done() } + +func BenchmarkAllocDoDropRPC(b *testing.B) { + gs := GossipSubRouter{tracer: &pubsubTracer{}} + + for i := 0; i < b.N; i++ { + gs.doDropRPC(&RPC{}, "peerID", "reason") + } +}