From de93f6bebcc56263fccedba379f4e4d4e61752a5 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 15:31:06 +0800 Subject: [PATCH 01/13] waku/0 init --- eth/p2p/rlpx_protocols/waku_protocol.nim | 1100 ++++++++++++++++++++++ 1 file changed, 1100 insertions(+) create mode 100644 eth/p2p/rlpx_protocols/waku_protocol.nim diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim new file mode 100644 index 00000000..c19925d1 --- /dev/null +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -0,0 +1,1100 @@ +# +# Whisper +# (c) Copyright 2018-2019 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) +# + +## Whisper +## ******* +## +## Whisper is a gossip protocol that synchronizes a set of messages across nodes +## with attention given to sender and recipient anonymitiy. Messages are +## categorized by a topic and stay alive in the network based on a time-to-live +## measured in seconds. Spam prevention is based on proof-of-work, where large +## or long-lived messages must spend more work. +## +## Example usage +## ---------- +## First an `EthereumNode` needs to be created, either with all capabilities set +## or with specifically the Whisper capability set. +## The latter can be done like this: +## +## .. code-block::nim +## var node = newEthereumNode(keypair, address, netId, nil, +## addAllCapabilities = false) +## node.addCapability Whisper +## +## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done. +## However, they only make real sense after ``connectToNetwork`` was started. As +## else there will be no peers to send and receive messages from. + +import + algorithm, bitops, math, options, sequtils, strutils, tables, times, chronos, + secp256k1, chronicles, hashes, stew/[byteutils, endians2], + nimcrypto/[bcmode, hash, keccak, rijndael, sysrand], + eth/common/eth_types, eth/[keys, rlp, async_utils, p2p], eth/p2p/ecies + +logScope: + topics = "whisper" + +const + flagsLen = 1 ## payload flags field length, bytes + gcmIVLen = 12 ## Length of IV (seed) used for AES + gcmTagLen = 16 ## Length of tag used to authenticate AES-GCM-encrypted message + padMaxLen = 256 ## payload will be padded to multiples of this by default + payloadLenLenBits = 0b11'u8 ## payload flags length-of-length mask + signatureBits = 0b100'u8 ## payload flags signature mask + bloomSize = 512 div 8 + defaultQueueCapacity = 256 + defaultFilterQueueCapacity = 64 + whisperVersion* = 6 ## Whisper version. + whisperVersionStr* = $whisperVersion ## Whisper version. + defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. + defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max + ## message size. This can never be larger than the maximum RLPx message size. + messageInterval* = chronos.milliseconds(300) ## Interval at which messages are + ## send to peers, in ms. + pruneInterval* = chronos.milliseconds(1000) ## Interval at which message + ## queue is pruned, in ms. + +type + Hash* = MDigest[256] + SymKey* = array[256 div 8, byte] ## AES256 key. + Topic* = array[4, byte] ## 4 bytes that can be used to filter messages on. + Bloom* = array[bloomSize, byte] ## A bloom filter that can be used to identify + ## a number of topics that a peer is interested in. + # XXX: nim-eth-bloom has really quirky API and fixed + # bloom size. + # stint is massive overkill / poor fit - a bloom filter is an array of bits, + # not a number + + Payload* = object + ## Payload is what goes in the data field of the Envelope. + + src*: Option[PrivateKey] ## Optional key used for signing message + dst*: Option[PublicKey] ## Optional key used for asymmetric encryption + symKey*: Option[SymKey] ## Optional key used for symmetric encryption + payload*: Bytes ## Application data / message contents + padding*: Option[Bytes] ## Padding - if unset, will automatically pad up to + ## nearest maxPadLen-byte boundary + DecodedPayload* = object + ## The decoded payload of a received message. + + src*: Option[PublicKey] ## If the message was signed, this is the public key + ## of the source + payload*: Bytes ## Application data / message contents + padding*: Option[Bytes] ## Message padding + + Envelope* = object + ## What goes on the wire in the whisper protocol - a payload and some + ## book-keeping + # Don't touch field order, there's lots of macro magic that depends on it + expiry*: uint32 ## Unix timestamp when message expires + ttl*: uint32 ## Time-to-live, seconds - message was created at (expiry - ttl) + topic*: Topic + data*: Bytes ## Payload, as given by user + nonce*: uint64 ## Nonce used for proof-of-work calculation + + Message* = object + ## An Envelope with a few cached properties + + env*: Envelope + hash*: Hash ## Hash, as calculated for proof-of-work + size*: uint32 ## RLP-encoded size of message + pow*: float64 ## Calculated proof-of-work + bloom*: Bloom ## Filter sent to direct peers for topic-based filtering + isP2P: bool + + ReceivedMessage* = object + ## A received message that matched a filter and was possible to decrypt. + ## Contains the decoded payload and additional information. + decoded*: DecodedPayload + timestamp*: uint32 + ttl*: uint32 + topic*: Topic + pow*: float64 + hash*: Hash + dst*: Option[PublicKey] + + Queue* = object + ## Bounded message repository + ## + ## Whisper uses proof-of-work to judge the usefulness of a message staying + ## in the "cloud" - messages with low proof-of-work will be removed to make + ## room for those with higher pow, even if they haven't expired yet. + ## Larger messages and those with high time-to-live will require more pow. + items*: seq[Message] ## Sorted by proof-of-work + itemHashes*: HashSet[Message] ## For easy duplication checking + # XXX: itemHashes is added for easy message duplication checking and for + # easy pruning of the peer received message sets. It does have an impact on + # adding and pruning of items however. + # Need to give it some more thought and check where most time is lost in + # typical cases, perhaps we are better of with one hash table (lose PoW + # sorting however), or perhaps there is a simpler solution... + + capacity*: int ## Max messages to keep. \ + ## XXX: really big messages can cause excessive mem usage when using msg \ + ## count + + FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, closure.} + + Filter* = object + src*: Option[PublicKey] + privateKey*: Option[PrivateKey] + symKey*: Option[SymKey] + topics*: seq[Topic] + powReq*: float64 + allowP2P*: bool + + bloom: Bloom # Cached bloom filter of all topics of filter + handler: FilterMsgHandler + queue: seq[ReceivedMessage] + + Filters* = Table[string, Filter] + + WhisperConfig* = object + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + maxMsgSize*: uint32 + +# Utilities -------------------------------------------------------------------- + +proc leadingZeroBits(hash: MDigest): int = + ## Number of most significant zero bits before the first one + for h in hash.data: + static: doAssert sizeof(h) == 1 + if h == 0: + result += 8 + else: + result += countLeadingZeroBits(h) + break + +proc calcPow*(size, ttl: uint64, hash: Hash): float64 = + ## Whisper proof-of-work is defined as the best bit of a hash divided by + ## encoded size and time-to-live, such that large and long-lived messages get + ## penalized + + let bits = leadingZeroBits(hash) + return pow(2.0, bits.float64) / (size.float64 * ttl.float64) + +proc topicBloom*(topic: Topic): Bloom = + ## Whisper uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit + ## indexes into the bloom are created using the first 3 bytes of the topic and + ## complementing each byte with an extra bit from the last topic byte + for i in 0..<3: + var idx = uint16(topic[i]) + if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte + idx = idx + 256 + + doAssert idx <= 511 + result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16)) + +proc generateRandomID*(): string = + var bytes: array[256 div 8, byte] + while true: # XXX: error instead of looping? + if randomBytes(bytes) == 256 div 8: + result = toHex(bytes) + break + +proc `or`(a, b: Bloom): Bloom = + for i in 0..= 256*256*256: + notice "Payload exceeds max length", len = self.payload.len + return + + # length of the payload length field :) + let payloadLenLen = + if self.payload.len >= 256*256: 3'u8 + elif self.payload.len >= 256: 2'u8 + else: 1'u8 + + let signatureLen = + if self.src.isSome(): keys.RawSignatureSize + else: 0 + + # useful data length + let dataLen = flagsLen + payloadLenLen.int + self.payload.len + signatureLen + + let padLen = + if self.padding.isSome(): self.padding.get().len + # is there a reason why 256 bytes are padded when the dataLen is 256? + else: padMaxLen - (dataLen mod padMaxLen) + + # buffer space that we need to allocate + let totalLen = dataLen + padLen + + var plain = newSeqOfCap[byte](totalLen) + + let signatureFlag = + if self.src.isSome(): signatureBits + else: 0'u8 + + # byte 0: flags with payload length length and presence of signature + plain.add payloadLenLen or signatureFlag + + # next, length of payload - little endian (who comes up with this stuff? why + # can't the world just settle on one endian?) + let payloadLenLE = self.payload.len.uint32.toBytesLE + + # No, I have no love for nim closed ranges - such a mess to remember the extra + # < or risk off-by-ones when working with lengths.. + plain.add payloadLenLE[0.. pos + keys.RawSignatureSize: + res.padding = some(plain[pos .. ^(keys.RawSignatureSize+1)]) + else: + if plain.len > pos: + res.padding = some(plain[pos .. ^1]) + + return some(res) + +# Envelopes -------------------------------------------------------------------- + +proc valid*(self: Envelope, now = epochTime()): bool = + if self.expiry.float64 < now: return false # expired + if self.ttl <= 0: return false # this would invalidate pow calculation + + let created = self.expiry - self.ttl + if created.float64 > (now + 2.0): return false # created in the future + + return true + +proc len(self: Envelope): int = 20 + self.data.len + +proc toShortRlp*(self: Envelope): Bytes = + ## RLP-encoded message without nonce is used during proof-of-work calculations + rlp.encodeList(self.expiry, self.ttl, self.topic, self.data) + +proc toRlp(self: Envelope): Bytes = + ## What gets sent out over the wire includes the nonce + rlp.encode(self) + +proc minePow*(self: Envelope, seconds: float, bestBitTarget: int = 0): (uint64, Hash) = + ## For the given envelope, spend millis milliseconds to find the + ## best proof-of-work and return the nonce + let bytes = self.toShortRlp() + + var ctx: keccak256 + ctx.init() + ctx.update(bytes) + + var bestBit: int = 0 + + let mineEnd = epochTime() + seconds + + var i: uint64 + while epochTime() < mineEnd or bestBit == 0: # At least one round + var tmp = ctx # copy hash calculated so far - we'll reuse that for each iter + tmp.update(i.toBytesBE()) + # XXX:a random nonce here would not leak number of iters + let hash = tmp.finish() + let zeroBits = leadingZeroBits(hash) + if zeroBits > bestBit: # XXX: could also compare hashes as numbers instead + bestBit = zeroBits + result = (i, hash) + if bestBitTarget > 0 and bestBit >= bestBitTarget: + break + + i.inc + +proc calcPowHash*(self: Envelope): Hash = + ## Calculate the message hash, as done during mining - this can be used to + ## verify proof-of-work + + let bytes = self.toShortRlp() + + var ctx: keccak256 + ctx.init() + ctx.update(bytes) + ctx.update(self.nonce.toBytesBE()) + return ctx.finish() + +# Messages --------------------------------------------------------------------- + +proc cmpPow(a, b: Message): int = + ## Biggest pow first, lowest at the end (for easy popping) + if a.pow > b.pow: 1 + elif a.pow == b.pow: 0 + else: -1 + +proc initMessage*(env: Envelope, powCalc = true): Message = + result.env = env + result.size = env.toRlp().len().uint32 # XXX: calc len without creating RLP + result.bloom = topicBloom(env.topic) + if powCalc: + result.hash = env.calcPowHash() + result.pow = calcPow(result.env.len.uint32, result.env.ttl, result.hash) + trace "Message PoW", pow = result.pow.formatFloat(ffScientific) + +proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data) + +proc allowed*(msg: Message, config: WhisperConfig): bool = + # Check max msg size, already happens in RLPx but there is a specific shh + # max msg size which should always be < RLPx max msg size + if msg.size > config.maxMsgSize: + warn "Message size too large", size = msg.size + return false + + if msg.pow < config.powRequirement: + warn "Message PoW too low", pow = msg.pow, minPow = config.powRequirement + return false + + if not bloomFilterMatch(config.bloom, msg.bloom): + warn "Message does not match node bloom filter" + return false + + return true + +# NOTE: Hashing and leading zeroes calculation is now the same between geth, +# parity and this implementation. +# However, there is still a difference in the size calculation. +# See also here: https://github.com/ethereum/go-ethereum/pull/19753 +# This implementation is not conform EIP-627 as we do not use the size of the +# RLP-encoded envelope, but the size of the envelope object itself. +# This is done to be able to correctly calculate the bestBitTarget. +# Other options would be: +# - work directly with powTarget in minePow, but this requires recalculation of +# rlp size + calcPow +# - Use worst case size of envelope nonce +# - Mine PoW for x interval, calcPow of best result, if target not met .. repeat +proc sealEnvelope(msg: var Message, powTime: float, powTarget: float): bool = + let size = msg.env.len + if powTarget > 0: + let x = powTarget * size.float * msg.env.ttl.float + var bestBitTarget: int + if x <= 1: # log() would return negative numbers or 0 + bestBitTarget = 1 + else: + bestBitTarget = ceil(log(x, 2)).int + (msg.env.nonce, msg.hash) = msg.env.minePow(powTime, bestBitTarget) + else: + # If no target is set, we are certain of executed powTime + msg.env.expiry += powTime.uint32 + (msg.env.nonce, msg.hash) = msg.env.minePow(powTime) + + msg.pow = calcPow(size.uint32, msg.env.ttl, msg.hash) + trace "Message PoW", pow = msg.pow + if msg.pow < powTarget: + return false + + return true + +# Queues ----------------------------------------------------------------------- + +proc initQueue*(capacity: int): Queue = + result.items = newSeqOfCap[Message](capacity) + result.capacity = capacity + result.itemHashes.init() + +proc prune(self: var Queue) {.raises: [].} = + ## Remove items that are past their expiry time + let now = epochTime().uint32 + + # keepIf code + pruning of hashset + var pos = 0 + for i in 0 ..< len(self.items): + if self.items[i].env.expiry > now: + if pos != i: + shallowCopy(self.items[pos], self.items[i]) + inc(pos) + else: self.itemHashes.excl(self.items[i]) + setLen(self.items, pos) + +proc add*(self: var Queue, msg: Message): bool = + ## Add a message to the queue. + ## If we're at capacity, we will be removing, in order: + ## * expired messages + ## * lowest proof-of-work message - this may be `msg` itself! + + if self.items.len >= self.capacity: + self.prune() # Only prune if needed + + if self.items.len >= self.capacity: + # Still no room - go by proof-of-work quantity + let last = self.items[^1] + + if last.pow > msg.pow or + (last.pow == msg.pow and last.env.expiry > msg.env.expiry): + # The new message has less pow or will expire earlier - drop it + return false + + self.items.del(self.items.len() - 1) + self.itemHashes.excl(last) + + # check for duplicate + if self.itemHashes.containsOrIncl(msg): + return false + else: + self.items.insert(msg, self.items.lowerBound(msg, cmpPow)) + return true + +# Filters ---------------------------------------------------------------------- +proc newFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](), + symKey = none[SymKey](), topics: seq[Topic] = @[], + powReq = 0.0, allowP2P = false): Filter = + # Zero topics will give an empty bloom filter which is fine as this bloom + # filter is only used to `or` with existing/other bloom filters. Not to do + # matching. + Filter(src: src, privateKey: privateKey, symKey: symKey, topics: topics, + powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics)) + +proc subscribeFilter*(filters: var Filters, filter: Filter, + handler:FilterMsgHandler = nil): string = + # NOTE: Should we allow a filter without a key? Encryption is mandatory in v6? + # Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence. + let id = generateRandomID() + var filter = filter + if handler.isNil(): + filter.queue = newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) + else: + filter.handler = handler + + filters.add(id, filter) + debug "Filter added", filter = id + return id + +proc notify*(filters: var Filters, msg: Message) {.gcsafe.} = + var decoded: Option[DecodedPayload] + var keyHash: Hash + var dst: Option[PublicKey] + + for filter in filters.mvalues: + if not filter.allowP2P and msg.isP2P: + continue + + # if message is direct p2p PoW doesn't matter + if msg.pow < filter.powReq and not msg.isP2P: + continue + + if filter.topics.len > 0: + if msg.env.topic notin filter.topics: + continue + + # Decode, if already decoded previously check if hash of key matches + if decoded.isNone(): + decoded = decode(msg.env.data, dst = filter.privateKey, + symKey = filter.symKey) + if decoded.isNone(): + continue + if filter.privateKey.isSome(): + keyHash = keccak256.digest(filter.privateKey.get().data) + # TODO: Get rid of the hash and just use pubkey to compare? + dst = some(getPublicKey(filter.privateKey.get())) + elif filter.symKey.isSome(): + keyHash = keccak256.digest(filter.symKey.get()) + # else: + # NOTE: In this case the message was not encrypted + else: + if filter.privateKey.isSome(): + if keyHash != keccak256.digest(filter.privateKey.get().data): + continue + elif filter.symKey.isSome(): + if keyHash != keccak256.digest(filter.symKey.get()): + continue + # else: + # NOTE: In this case the message was not encrypted + + # When decoding is done we can check the src (signature) + if filter.src.isSome(): + let src: Option[PublicKey] = decoded.get().src + if not src.isSome(): + continue + elif src.get() != filter.src.get(): + continue + + let receivedMsg = ReceivedMessage(decoded: decoded.get(), + timestamp: msg.env.expiry - msg.env.ttl, + ttl: msg.env.ttl, + topic: msg.env.topic, + pow: msg.pow, + hash: msg.hash, + dst: dst) + # Either run callback or add to queue + if filter.handler.isNil(): + filter.queue.insert(receivedMsg) + else: + filter.handler(receivedMsg) + +proc getFilterMessages*(filters: var Filters, filterId: string): seq[ReceivedMessage] = + result = @[] + if filters.contains(filterId): + if filters[filterId].handler.isNil(): + shallowCopy(result, filters[filterId].queue) + filters[filterId].queue = + newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) + +proc toBloom*(filters: Filters): Bloom = + for filter in filters.values: + if filter.topics.len > 0: + result = result or filter.bloom + +type + WhisperPeer = ref object + initialized: bool # when successfully completed the handshake + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + trusted*: bool + received: HashSet[Message] + + WhisperNetwork = ref object + queue*: Queue + filters*: Filters + config*: WhisperConfig + +proc run(peer: Peer) {.gcsafe, async.} +proc run(node: EthereumNode, network: WhisperNetwork) {.gcsafe, async.} + +proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} = + network.queue = initQueue(defaultQueueCapacity) + network.filters = initTable[string, Filter]() + network.config.bloom = fullBloom() + network.config.powRequirement = defaultMinPow + network.config.isLightNode = false + network.config.maxMsgSize = defaultMaxMsgSize + asyncCheck node.run(network) + +p2pProtocol Whisper(version = whisperVersion, + rlpxName = "shh", + peerState = WhisperPeer, + networkState = WhisperNetwork): + + onPeerConnected do (peer: Peer): + trace "onPeerConnected Whisper" + let + whisperNet = peer.networkState + whisperPeer = peer.state + + let m = await peer.status(whisperVersion, + cast[uint](whisperNet.config.powRequirement), + @(whisperNet.config.bloom), + whisperNet.config.isLightNode, + timeout = chronos.milliseconds(500)) + + if m.protocolVersion == whisperVersion: + debug "Whisper peer", peer, whisperVersion + else: + raise newException(UselessPeerError, "Incompatible Whisper version") + + whisperPeer.powRequirement = cast[float64](m.powConverted) + + if m.bloom.len > 0: + if m.bloom.len != bloomSize: + raise newException(UselessPeerError, "Bloomfilter size mismatch") + else: + whisperPeer.bloom.bytesCopy(m.bloom) + else: + # If no bloom filter is send we allow all + whisperPeer.bloom = fullBloom() + + whisperPeer.isLightNode = m.isLightNode + if whisperPeer.isLightNode and whisperNet.config.isLightNode: + # No sense in connecting two light nodes so we disconnect + raise newException(UselessPeerError, "Two light nodes connected") + + whisperPeer.received.init() + whisperPeer.trusted = false + whisperPeer.initialized = true + + if not whisperNet.config.isLightNode: + traceAsyncErrors peer.run() + + debug "Whisper peer initialized", peer + + handshake: + proc status(peer: Peer, + protocolVersion: uint, + powConverted: uint, + bloom: Bytes, + isLightNode: bool) + + proc messages(peer: Peer, envelopes: openarray[Envelope]) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding messages" + return + + for envelope in envelopes: + # check if expired or in future, or ttl not 0 + if not envelope.valid(): + warn "Expired or future timed envelope", peer + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + let msg = initMessage(envelope) + if not msg.allowed(peer.networkState.config): + # disconnect from peers sending bad envelopes + # await peer.disconnect(SubprotocolReason) + continue + + # This peer send this message thus should not receive it again. + # If this peer has the message in the `received` set already, this means + # it was either already received here from this peer or send to this peer. + # Either way it will be in our queue already (and the peer should know + # this) and this peer is sending duplicates. + # Note: geth does not check if a peer has send a message to them before + # broadcasting this message. This too is seen here as a duplicate message + # (see above comment). If we want to seperate these cases (e.g. when peer + # rating), then we have to add a "peer.state.send" HashSet. + if peer.state.received.containsOrIncl(msg): + debug "Peer sending duplicate messages", peer, hash = msg.hash + # await peer.disconnect(SubprotocolReason) + continue + + # This can still be a duplicate message, but from another peer than + # the peer who send the message. + if peer.networkState.queue.add(msg): + # notify filters of this message + peer.networkState.filters.notify(msg) + + proc powRequirement(peer: Peer, value: uint) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding powRequirement" + return + + peer.state.powRequirement = cast[float64](value) + + proc bloomFilterExchange(peer: Peer, bloom: Bytes) = + if not peer.state.initialized: + warn "Handshake not completed yet, discarding bloomFilterExchange" + return + + if bloom.len == bloomSize: + peer.state.bloom.bytesCopy(bloom) + + nextID 126 + + proc p2pRequest(peer: Peer, envelope: Envelope) = + # TODO: here we would have to allow to insert some specific implementation + # such as e.g. Whisper Mail Server + discard + + proc p2pMessage(peer: Peer, envelope: Envelope) = + if peer.state.trusted: + # when trusted we can bypass any checks on envelope + let msg = Message(env: envelope, isP2P: true) + peer.networkState.filters.notify(msg) + + # Following message IDs are not part of EIP-627, but are added and used by + # the Status application, we ignore them for now. + nextID 11 + proc batchAcknowledged(peer: Peer) = discard + proc messageResponse(peer: Peer) = discard + + nextID 123 + requestResponse: + proc p2pSyncRequest(peer: Peer) = discard + proc p2pSyncResponse(peer: Peer) = discard + + proc p2pRequestComplete(peer: Peer) = discard + +# 'Runner' calls --------------------------------------------------------------- + +proc processQueue(peer: Peer) = + # Send to peer all valid and previously not send envelopes in the queue. + var + envelopes: seq[Envelope] = @[] + whisperPeer = peer.state(Whisper) + whisperNet = peer.networkState(Whisper) + + for message in whisperNet.queue.items: + if whisperPeer.received.contains(message): + # debug "message was already send to peer" + continue + + if message.pow < whisperPeer.powRequirement: + debug "Message PoW too low for peer", pow = message.pow, + powReq = whisperPeer.powRequirement + continue + + if not bloomFilterMatch(whisperPeer.bloom, message.bloom): + debug "Message does not match peer bloom filter" + continue + + trace "Adding envelope" + envelopes.add(message.env) + whisperPeer.received.incl(message) + + trace "Sending envelopes", amount=envelopes.len + # Ignore failure of sending messages, this could occur when the connection + # gets dropped + traceAsyncErrors peer.messages(envelopes) + +proc run(peer: Peer) {.async.} = + while peer.connectionState notin {Disconnecting, Disconnected}: + peer.processQueue() + await sleepAsync(messageInterval) + +proc pruneReceived(node: EthereumNode) {.raises: [].} = + if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ... + var whisperNet = node.protocolState(Whisper) + + for peer in node.protocolPeers(Whisper): + if not peer.initialized: + continue + + # NOTE: Perhaps alter the queue prune call to keep track of a HashSet + # of pruned messages (as these should be smaller), and diff this with + # the received sets. + peer.received = intersection(peer.received, whisperNet.queue.itemHashes) + +proc run(node: EthereumNode, network: WhisperNetwork) {.async.} = + while true: + # prune message queue every second + # TTL unit is in seconds, so this should be sufficient? + network.queue.prune() + # pruning the received sets is not necessary for correct workings + # but simply from keeping the sets growing indefinitely + node.pruneReceived() + await sleepAsync(pruneInterval) + +# Private EthereumNode calls --------------------------------------------------- + +proc sendP2PMessage(node: EthereumNode, peerId: NodeId, env: Envelope): bool = + for peer in node.peers(Whisper): + if peer.remote.id == peerId: + asyncCheck peer.p2pMessage(env) + return true + +proc queueMessage(node: EthereumNode, msg: Message): bool = + + var whisperNet = node.protocolState(Whisper) + # We have to do the same checks here as in the messages proc not to leak + # any information that the message originates from this node. + if not msg.allowed(whisperNet.config): + return false + + trace "Adding message to queue" + if whisperNet.queue.add(msg): + # Also notify our own filters of the message we are sending, + # e.g. msg from local Dapp to Dapp + whisperNet.filters.notify(msg) + + return true + +# Public EthereumNode calls ---------------------------------------------------- + +proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](), + symKey = none[SymKey](), src = none[PrivateKey](), + ttl: uint32, topic: Topic, payload: Bytes, + padding = none[Bytes](), powTime = 1'f, + powTarget = defaultMinPow, + targetPeer = none[NodeId]()): bool = + ## Post a message on the message queue which will be processed at the + ## next `messageInterval`. + ## + ## NOTE: This call allows a post without encryption. If encryption is + ## mandatory it should be enforced a layer up + let payload = encode(Payload(payload: payload, src: src, dst: pubKey, + symKey: symKey, padding: padding)) + if payload.isSome(): + var env = Envelope(expiry:epochTime().uint32 + ttl, + ttl: ttl, topic: topic, data: payload.get(), nonce: 0) + + # Allow lightnode to post only direct p2p messages + if targetPeer.isSome(): + return node.sendP2PMessage(targetPeer.get(), env) + elif not node.protocolState(Whisper).config.isLightNode: + # non direct p2p message can not have ttl of 0 + if env.ttl == 0: + return false + var msg = initMessage(env, powCalc = false) + # XXX: make this non blocking or not? + # In its current blocking state, it could be noticed by a peer that no + # messages are send for a while, and thus that mining PoW is done, and + # that next messages contains a message originated from this peer + # zah: It would be hard to execute this in a background thread at the + # moment. We'll need a way to send custom "tasks" to the async message + # loop (e.g. AD2 support for AsyncChannels). + if not msg.sealEnvelope(powTime, powTarget): + return false + + # need to check expiry after mining PoW + if not msg.env.valid(): + return false + + return node.queueMessage(msg) + else: + warn "Light node not allowed to post messages" + return false + else: + error "Encoding of payload failed" + return false + +proc subscribeFilter*(node: EthereumNode, filter: Filter, + handler:FilterMsgHandler = nil): string = + ## Initiate a filter for incoming/outgoing messages. Messages can be + ## retrieved with the `getFilterMessages` call or with a provided + ## `FilterMsgHandler`. + ## + ## NOTE: This call allows for a filter without decryption. If encryption is + ## mandatory it should be enforced a layer up. + return node.protocolState(Whisper).filters.subscribeFilter(filter, handler) + +proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool = + ## Remove a previously subscribed filter. + var filter: Filter + return node.protocolState(Whisper).filters.take(filterId, filter) + +proc getFilterMessages*(node: EthereumNode, filterId: string): seq[ReceivedMessage] = + ## Get all the messages currently in the filter queue. This will reset the + ## filter message queue. + return node.protocolState(Whisper).filters.getFilterMessages(filterId) + +proc filtersToBloom*(node: EthereumNode): Bloom = + ## Returns the bloom filter of all topics of all subscribed filters. + return node.protocolState(Whisper).filters.toBloom() + +proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} = + ## Sets the PoW requirement for this node, will also send + ## this new PoW requirement to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old PoW for some time? + node.protocolState(Whisper).config.powRequirement = powReq + var futures: seq[Future[void]] = @[] + for peer in node.peers(Whisper): + futures.add(peer.powRequirement(cast[uint](powReq))) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} = + ## Sets the bloom filter for this node, will also send + ## this new bloom filter to all connected peers. + ## + ## Failures when sending messages to peers will not be reported. + # NOTE: do we need a tolerance of old bloom filter for some time? + node.protocolState(Whisper).config.bloom = bloom + var futures: seq[Future[void]] = @[] + for peer in node.peers(Whisper): + futures.add(peer.bloomFilterExchange(@bloom)) + + # Exceptions from sendMsg will not be raised + await allFutures(futures) + +proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool = + ## Set the maximum allowed message size. + ## Can not be set higher than ``defaultMaxMsgSize``. + if size > defaultMaxMsgSize: + warn "size > defaultMaxMsgSize" + return false + node.protocolState(Whisper).config.maxMsgSize = size + return true + +proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool = + ## Set a connected peer as trusted. + for peer in node.peers(Whisper): + if peer.remote.id == peerId: + peer.state(Whisper).trusted = true + return true + +proc setLightNode*(node: EthereumNode, isLightNode: bool) = + ## Set this node as a Whisper light node. + ## + ## NOTE: Should be run before connection is made with peers as this + ## setting is only communicated at peer handshake. + node.protocolState(Whisper).config.isLightNode = isLightNode + +proc configureWhisper*(node: EthereumNode, config: WhisperConfig) = + ## Apply a Whisper configuration. + ## + ## NOTE: Should be run before connection is made with peers as some + ## of the settings are only communicated at peer handshake. + node.protocolState(Whisper).config = config + +proc resetMessageQueue*(node: EthereumNode) = + ## Full reset of the message queue. + ## + ## NOTE: Not something that should be run in normal circumstances. + node.protocolState(Whisper).queue = initQueue(defaultQueueCapacity) From fec788655b56a57eb99bb288d6015e106651dd3b Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 15:32:46 +0800 Subject: [PATCH 02/13] waku: whisper-> waku; version 0 --- eth/p2p/rlpx_protocols/waku_protocol.nim | 162 ++++++++++++----------- 1 file changed, 82 insertions(+), 80 deletions(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index c19925d1..cfe94b67 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -1,5 +1,5 @@ # -# Whisper +# Waku # (c) Copyright 2018-2019 # Status Research & Development GmbH # @@ -8,10 +8,12 @@ # MIT license (LICENSE-MIT) # -## Whisper +## Waku ## ******* ## -## Whisper is a gossip protocol that synchronizes a set of messages across nodes +## Waku is a fork of Whisper. +## +## Waku is a gossip protocol that synchronizes a set of messages across nodes ## with attention given to sender and recipient anonymitiy. Messages are ## categorized by a topic and stay alive in the network based on a time-to-live ## measured in seconds. Spam prevention is based on proof-of-work, where large @@ -20,13 +22,13 @@ ## Example usage ## ---------- ## First an `EthereumNode` needs to be created, either with all capabilities set -## or with specifically the Whisper capability set. +## or with specifically the Waku capability set. ## The latter can be done like this: ## ## .. code-block::nim ## var node = newEthereumNode(keypair, address, netId, nil, ## addAllCapabilities = false) -## node.addCapability Whisper +## node.addCapability Waku ## ## Now calls such as ``postMessage`` and ``subscribeFilter`` can be done. ## However, they only make real sense after ``connectToNetwork`` was started. As @@ -39,7 +41,7 @@ import eth/common/eth_types, eth/[keys, rlp, async_utils, p2p], eth/p2p/ecies logScope: - topics = "whisper" + topics = "waku" const flagsLen = 1 ## payload flags field length, bytes @@ -51,8 +53,8 @@ const bloomSize = 512 div 8 defaultQueueCapacity = 256 defaultFilterQueueCapacity = 64 - whisperVersion* = 6 ## Whisper version. - whisperVersionStr* = $whisperVersion ## Whisper version. + wakuVersion* = 0 ## Waku version. + wakuVersionStr* = $wakuVersion ## Waku version. defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max ## message size. This can never be larger than the maximum RLPx message size. @@ -90,7 +92,7 @@ type padding*: Option[Bytes] ## Message padding Envelope* = object - ## What goes on the wire in the whisper protocol - a payload and some + ## What goes on the wire in the waku protocol - a payload and some ## book-keeping # Don't touch field order, there's lots of macro magic that depends on it expiry*: uint32 ## Unix timestamp when message expires @@ -123,7 +125,7 @@ type Queue* = object ## Bounded message repository ## - ## Whisper uses proof-of-work to judge the usefulness of a message staying + ## Waku uses proof-of-work to judge the usefulness of a message staying ## in the "cloud" - messages with low proof-of-work will be removed to make ## room for those with higher pow, even if they haven't expired yet. ## Larger messages and those with high time-to-live will require more pow. @@ -156,7 +158,7 @@ type Filters* = Table[string, Filter] - WhisperConfig* = object + WakuConfig* = object powRequirement*: float64 bloom*: Bloom isLightNode*: bool @@ -175,7 +177,7 @@ proc leadingZeroBits(hash: MDigest): int = break proc calcPow*(size, ttl: uint64, hash: Hash): float64 = - ## Whisper proof-of-work is defined as the best bit of a hash divided by + ## Waku proof-of-work is defined as the best bit of a hash divided by ## encoded size and time-to-live, such that large and long-lived messages get ## penalized @@ -183,7 +185,7 @@ proc calcPow*(size, ttl: uint64, hash: Hash): float64 = return pow(2.0, bits.float64) / (size.float64 * ttl.float64) proc topicBloom*(topic: Topic): Bloom = - ## Whisper uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit + ## Waku uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit ## indexes into the bloom are created using the first 3 bytes of the topic and ## complementing each byte with an extra bit from the last topic byte for i in 0..<3: @@ -518,8 +520,8 @@ proc initMessage*(env: Envelope, powCalc = true): Message = proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data) -proc allowed*(msg: Message, config: WhisperConfig): bool = - # Check max msg size, already happens in RLPx but there is a specific shh +proc allowed*(msg: Message, config: WakuConfig): bool = + # Check max msg size, already happens in RLPx but there is a specific waku # max msg size which should always be < RLPx max msg size if msg.size > config.maxMsgSize: warn "Message size too large", size = msg.size @@ -719,7 +721,7 @@ proc toBloom*(filters: Filters): Bloom = result = result or filter.bloom type - WhisperPeer = ref object + WakuPeer = ref object initialized: bool # when successfully completed the handshake powRequirement*: float64 bloom*: Bloom @@ -727,15 +729,15 @@ type trusted*: bool received: HashSet[Message] - WhisperNetwork = ref object + WakuNetwork = ref object queue*: Queue filters*: Filters - config*: WhisperConfig + config*: WakuConfig proc run(peer: Peer) {.gcsafe, async.} -proc run(node: EthereumNode, network: WhisperNetwork) {.gcsafe, async.} +proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.} -proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} = +proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = network.queue = initQueue(defaultQueueCapacity) network.filters = initTable[string, Filter]() network.config.bloom = fullBloom() @@ -744,52 +746,52 @@ proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} network.config.maxMsgSize = defaultMaxMsgSize asyncCheck node.run(network) -p2pProtocol Whisper(version = whisperVersion, - rlpxName = "shh", - peerState = WhisperPeer, - networkState = WhisperNetwork): +p2pProtocol Waku(version = wakuVersion, + rlpxName = "waku", + peerState = WakuPeer, + networkState = WakuNetwork): onPeerConnected do (peer: Peer): - trace "onPeerConnected Whisper" + trace "onPeerConnected Waku" let - whisperNet = peer.networkState - whisperPeer = peer.state + wakuNet = peer.networkState + wakuPeer = peer.state - let m = await peer.status(whisperVersion, - cast[uint](whisperNet.config.powRequirement), - @(whisperNet.config.bloom), - whisperNet.config.isLightNode, + let m = await peer.status(wakuVersion, + cast[uint](wakuNet.config.powRequirement), + @(wakuNet.config.bloom), + wakuNet.config.isLightNode, timeout = chronos.milliseconds(500)) - if m.protocolVersion == whisperVersion: - debug "Whisper peer", peer, whisperVersion + if m.protocolVersion == wakuVersion: + debug "Waku peer", peer, wakuVersion else: - raise newException(UselessPeerError, "Incompatible Whisper version") + raise newException(UselessPeerError, "Incompatible Waku version") - whisperPeer.powRequirement = cast[float64](m.powConverted) + wakuPeer.powRequirement = cast[float64](m.powConverted) if m.bloom.len > 0: if m.bloom.len != bloomSize: raise newException(UselessPeerError, "Bloomfilter size mismatch") else: - whisperPeer.bloom.bytesCopy(m.bloom) + wakuPeer.bloom.bytesCopy(m.bloom) else: # If no bloom filter is send we allow all - whisperPeer.bloom = fullBloom() + wakuPeer.bloom = fullBloom() - whisperPeer.isLightNode = m.isLightNode - if whisperPeer.isLightNode and whisperNet.config.isLightNode: + wakuPeer.isLightNode = m.isLightNode + if wakuPeer.isLightNode and wakuNet.config.isLightNode: # No sense in connecting two light nodes so we disconnect raise newException(UselessPeerError, "Two light nodes connected") - whisperPeer.received.init() - whisperPeer.trusted = false - whisperPeer.initialized = true + wakuPeer.received.init() + wakuPeer.trusted = false + wakuPeer.initialized = true - if not whisperNet.config.isLightNode: + if not wakuNet.config.isLightNode: traceAsyncErrors peer.run() - debug "Whisper peer initialized", peer + debug "Waku peer initialized", peer handshake: proc status(peer: Peer, @@ -856,7 +858,7 @@ p2pProtocol Whisper(version = whisperVersion, proc p2pRequest(peer: Peer, envelope: Envelope) = # TODO: here we would have to allow to insert some specific implementation - # such as e.g. Whisper Mail Server + # such as e.g. Waku Mail Server discard proc p2pMessage(peer: Peer, envelope: Envelope) = @@ -884,26 +886,26 @@ proc processQueue(peer: Peer) = # Send to peer all valid and previously not send envelopes in the queue. var envelopes: seq[Envelope] = @[] - whisperPeer = peer.state(Whisper) - whisperNet = peer.networkState(Whisper) + wakuPeer = peer.state(Waku) + wakuNet = peer.networkState(Waku) - for message in whisperNet.queue.items: - if whisperPeer.received.contains(message): + for message in wakuNet.queue.items: + if wakuPeer.received.contains(message): # debug "message was already send to peer" continue - if message.pow < whisperPeer.powRequirement: + if message.pow < wakuPeer.powRequirement: debug "Message PoW too low for peer", pow = message.pow, - powReq = whisperPeer.powRequirement + powReq = wakuPeer.powRequirement continue - if not bloomFilterMatch(whisperPeer.bloom, message.bloom): + if not bloomFilterMatch(wakuPeer.bloom, message.bloom): debug "Message does not match peer bloom filter" continue trace "Adding envelope" envelopes.add(message.env) - whisperPeer.received.incl(message) + wakuPeer.received.incl(message) trace "Sending envelopes", amount=envelopes.len # Ignore failure of sending messages, this could occur when the connection @@ -917,18 +919,18 @@ proc run(peer: Peer) {.async.} = proc pruneReceived(node: EthereumNode) {.raises: [].} = if node.peerPool != nil: # XXX: a bit dirty to need to check for this here ... - var whisperNet = node.protocolState(Whisper) + var wakuNet = node.protocolState(Waku) - for peer in node.protocolPeers(Whisper): + for peer in node.protocolPeers(Waku): if not peer.initialized: continue # NOTE: Perhaps alter the queue prune call to keep track of a HashSet # of pruned messages (as these should be smaller), and diff this with # the received sets. - peer.received = intersection(peer.received, whisperNet.queue.itemHashes) + peer.received = intersection(peer.received, wakuNet.queue.itemHashes) -proc run(node: EthereumNode, network: WhisperNetwork) {.async.} = +proc run(node: EthereumNode, network: WakuNetwork) {.async.} = while true: # prune message queue every second # TTL unit is in seconds, so this should be sufficient? @@ -941,24 +943,24 @@ proc run(node: EthereumNode, network: WhisperNetwork) {.async.} = # Private EthereumNode calls --------------------------------------------------- proc sendP2PMessage(node: EthereumNode, peerId: NodeId, env: Envelope): bool = - for peer in node.peers(Whisper): + for peer in node.peers(Waku): if peer.remote.id == peerId: asyncCheck peer.p2pMessage(env) return true proc queueMessage(node: EthereumNode, msg: Message): bool = - var whisperNet = node.protocolState(Whisper) + var wakuNet = node.protocolState(Waku) # We have to do the same checks here as in the messages proc not to leak # any information that the message originates from this node. - if not msg.allowed(whisperNet.config): + if not msg.allowed(wakuNet.config): return false trace "Adding message to queue" - if whisperNet.queue.add(msg): + if wakuNet.queue.add(msg): # Also notify our own filters of the message we are sending, # e.g. msg from local Dapp to Dapp - whisperNet.filters.notify(msg) + wakuNet.filters.notify(msg) return true @@ -984,7 +986,7 @@ proc postMessage*(node: EthereumNode, pubKey = none[PublicKey](), # Allow lightnode to post only direct p2p messages if targetPeer.isSome(): return node.sendP2PMessage(targetPeer.get(), env) - elif not node.protocolState(Whisper).config.isLightNode: + elif not node.protocolState(Waku).config.isLightNode: # non direct p2p message can not have ttl of 0 if env.ttl == 0: return false @@ -1019,21 +1021,21 @@ proc subscribeFilter*(node: EthereumNode, filter: Filter, ## ## NOTE: This call allows for a filter without decryption. If encryption is ## mandatory it should be enforced a layer up. - return node.protocolState(Whisper).filters.subscribeFilter(filter, handler) + return node.protocolState(Waku).filters.subscribeFilter(filter, handler) proc unsubscribeFilter*(node: EthereumNode, filterId: string): bool = ## Remove a previously subscribed filter. var filter: Filter - return node.protocolState(Whisper).filters.take(filterId, filter) + return node.protocolState(Waku).filters.take(filterId, filter) proc getFilterMessages*(node: EthereumNode, filterId: string): seq[ReceivedMessage] = ## Get all the messages currently in the filter queue. This will reset the ## filter message queue. - return node.protocolState(Whisper).filters.getFilterMessages(filterId) + return node.protocolState(Waku).filters.getFilterMessages(filterId) proc filtersToBloom*(node: EthereumNode): Bloom = ## Returns the bloom filter of all topics of all subscribed filters. - return node.protocolState(Whisper).filters.toBloom() + return node.protocolState(Waku).filters.toBloom() proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} = ## Sets the PoW requirement for this node, will also send @@ -1041,9 +1043,9 @@ proc setPowRequirement*(node: EthereumNode, powReq: float64) {.async.} = ## ## Failures when sending messages to peers will not be reported. # NOTE: do we need a tolerance of old PoW for some time? - node.protocolState(Whisper).config.powRequirement = powReq + node.protocolState(Waku).config.powRequirement = powReq var futures: seq[Future[void]] = @[] - for peer in node.peers(Whisper): + for peer in node.peers(Waku): futures.add(peer.powRequirement(cast[uint](powReq))) # Exceptions from sendMsg will not be raised @@ -1055,9 +1057,9 @@ proc setBloomFilter*(node: EthereumNode, bloom: Bloom) {.async.} = ## ## Failures when sending messages to peers will not be reported. # NOTE: do we need a tolerance of old bloom filter for some time? - node.protocolState(Whisper).config.bloom = bloom + node.protocolState(Waku).config.bloom = bloom var futures: seq[Future[void]] = @[] - for peer in node.peers(Whisper): + for peer in node.peers(Waku): futures.add(peer.bloomFilterExchange(@bloom)) # Exceptions from sendMsg will not be raised @@ -1069,32 +1071,32 @@ proc setMaxMessageSize*(node: EthereumNode, size: uint32): bool = if size > defaultMaxMsgSize: warn "size > defaultMaxMsgSize" return false - node.protocolState(Whisper).config.maxMsgSize = size + node.protocolState(Waku).config.maxMsgSize = size return true proc setPeerTrusted*(node: EthereumNode, peerId: NodeId): bool = ## Set a connected peer as trusted. - for peer in node.peers(Whisper): + for peer in node.peers(Waku): if peer.remote.id == peerId: - peer.state(Whisper).trusted = true + peer.state(Waku).trusted = true return true proc setLightNode*(node: EthereumNode, isLightNode: bool) = - ## Set this node as a Whisper light node. + ## Set this node as a Waku light node. ## ## NOTE: Should be run before connection is made with peers as this ## setting is only communicated at peer handshake. - node.protocolState(Whisper).config.isLightNode = isLightNode + node.protocolState(Waku).config.isLightNode = isLightNode -proc configureWhisper*(node: EthereumNode, config: WhisperConfig) = - ## Apply a Whisper configuration. +proc configureWaku*(node: EthereumNode, config: WakuConfig) = + ## Apply a Waku configuration. ## ## NOTE: Should be run before connection is made with peers as some ## of the settings are only communicated at peer handshake. - node.protocolState(Whisper).config = config + node.protocolState(Waku).config = config proc resetMessageQueue*(node: EthereumNode) = ## Full reset of the message queue. ## ## NOTE: Not something that should be run in normal circumstances. - node.protocolState(Whisper).queue = initQueue(defaultQueueCapacity) + node.protocolState(Waku).queue = initQueue(defaultQueueCapacity) From 2c4ec30ef2b7d8f7af932e59a300a8df4c8f2858 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 15:52:45 +0800 Subject: [PATCH 03/13] waku zero import shh tests --- tests/p2p/test_waku.nim | 416 ++++++++++++++++++++++++++++++++ tests/p2p/test_waku_connect.nim | 327 +++++++++++++++++++++++++ 2 files changed, 743 insertions(+) create mode 100644 tests/p2p/test_waku.nim create mode 100644 tests/p2p/test_waku_connect.nim diff --git a/tests/p2p/test_waku.nim b/tests/p2p/test_waku.nim new file mode 100644 index 00000000..25aac941 --- /dev/null +++ b/tests/p2p/test_waku.nim @@ -0,0 +1,416 @@ +# +# Ethereum P2P +# (c) Copyright 2018 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +import + sequtils, options, unittest, times, tables, + nimcrypto/hash, + eth/[keys, rlp], + eth/p2p/rlpx_protocols/whisper_protocol as whisper + +suite "Whisper payload": + test "should roundtrip without keys": + let payload = Payload(payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should roundtrip with symmetric encryption": + var symKey: SymKey + let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get(), symKey = some(symKey)) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should roundtrip with signature": + let privKey = keys.newPrivateKey() + + let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.getPublicKey() == decoded.get().src.get() + decoded.get().padding.get().len == 186 # 256 -1 -1 -3 -65 + + test "should roundtrip with asymmetric encryption": + let privKey = keys.newPrivateKey() + + let payload = Payload(dst: some(privKey.getPublicKey()), + payload: @[byte 0, 1, 2]) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get(), dst = some(privKey)) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().src.isNone() + decoded.get().padding.get().len == 251 # 256 -1 -1 -3 + + test "should return specified bloom": + # Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/whisper/whisperv6/whisper_test.go#L834 + let top0 = [byte 0, 0, 255, 6] + var x: Bloom + x[0] = byte 1 + x[32] = byte 1 + x[^1] = byte 128 + check @(top0.topicBloom) == @x + +suite "Whisper payload padding": + test "should do max padding": + let payload = Payload(payload: repeat(byte 1, 254)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + decoded.get().padding.get().len == 256 # as dataLen == 256 + + test "should do max padding with signature": + let privKey = keys.newPrivateKey() + + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.getPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + decoded.get().padding.get().len == 256 # as dataLen == 256 + + test "should do min padding": + let payload = Payload(payload: repeat(byte 1, 253)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + decoded.get().padding.get().len == 1 # as dataLen == 255 + + test "should do min padding with signature": + let privKey = keys.newPrivateKey() + + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.getPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + decoded.get().padding.get().len == 1 # as dataLen == 255 + + test "should roundtrip custom padding": + let payload = Payload(payload: repeat(byte 1, 10), + padding: some(repeat(byte 2, 100))) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isSome() + payload.padding.get() == decoded.get().padding.get() + + test "should roundtrip custom 0 padding": + let padding: seq[byte] = @[] + let payload = Payload(payload: repeat(byte 1, 10), + padding: some(padding)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + decoded.get().padding.isNone() + + test "should roundtrip custom padding with signature": + let privKey = keys.newPrivateKey() + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), + padding: some(repeat(byte 2, 100))) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.getPublicKey() == decoded.get().src.get() + decoded.get().padding.isSome() + payload.padding.get() == decoded.get().padding.get() + + test "should roundtrip custom 0 padding with signature": + let padding: seq[byte] = @[] + let privKey = keys.newPrivateKey() + let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), + padding: some(padding)) + let encoded = whisper.encode(payload) + + let decoded = whisper.decode(encoded.get()) + check: + decoded.isSome() + payload.payload == decoded.get().payload + privKey.getPublicKey() == decoded.get().src.get() + decoded.get().padding.isNone() + +# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/whisper/src/message.rs#L439 +let + env0 = Envelope( + expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], + data: repeat(byte 9, 256), nonce: 1010101) + env1 = Envelope( + expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], + data: repeat(byte 9, 256), nonce: 1010102) + +suite "Whisper envelope": + + proc hashAndPow(env: Envelope): (string, float64) = + # This is the current implementation of go-ethereum + let size = env.toShortRlp().len().uint32 + # This is our current implementation in `whisper_protocol.nim` + # let size = env.len().uint32 + # This is the EIP-627 specification + # let size = env.toRlp().len().uint32 + let hash = env.calcPowHash() + ($hash, calcPow(size, env.ttl, hash)) + + test "PoW calculation leading zeroes tests": + # Test values from Parity, in message.rs + let testHashes = [ + # 256 leading zeroes + "0x0000000000000000000000000000000000000000000000000000000000000000", + # 255 leading zeroes + "0x0000000000000000000000000000000000000000000000000000000000000001", + # no leading zeroes + "0xff00000000000000000000000000000000000000000000000000000000000000" + ] + check: + calcPow(1, 1, Hash.fromHex(testHashes[0])) == + 115792089237316200000000000000000000000000000000000000000000000000000000000000.0 + calcPow(1, 1, Hash.fromHex(testHashes[1])) == + 57896044618658100000000000000000000000000000000000000000000000000000000000000.0 + calcPow(1, 1, Hash.fromHex(testHashes[2])) == 1.0 + + # Test values from go-ethereum whisperv6 in envelope_test + var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef]) + # PoW calculation with no leading zeroes + env.nonce = 100000 + check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6", + 0.07692307692307693) + # PoW calculation with 8 leading zeroes + env.nonce = 276 + check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C", + 19.692307692307693) + + test "should validate and allow envelope according to config": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() + + let msg = initMessage(env) + check msg.allowed(config) + + test "should invalidate envelope due to ttl 0": + let ttl = 0'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to expired": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should invalidate envelope due to in the future": + let ttl = 1'u32 + let topic = [byte 1, 2, 3, 4] + let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + # there is currently a 2 second tolerance, hence the + 3 + let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + check env.valid() == false + + test "should not allow envelope due to bloom filter": + let topic = [byte 1, 2, 3, 4] + let wrongTopic = [byte 9, 8, 7, 6] + let config = WhisperConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(), + isLightNode: false, maxMsgSize: defaultMaxMsgSize) + + let env = Envelope(expiry:100000 , ttl: 30, topic: topic, + data: repeat(byte 9, 256), nonce: 0) + + let msg = initMessage(env) + check msg.allowed(config) == false + + +suite "Whisper queue": + test "should throw out lower proof-of-work item when full": + var queue = initQueue(1) + + let msg0 = initMessage(env0) + let msg1 = initMessage(env1) + + discard queue.add(msg0) + discard queue.add(msg1) + + check: + queue.items.len() == 1 + queue.items[0].env.nonce == + (if msg0.pow > msg1.pow: msg0.env.nonce else: msg1.env.nonce) + + test "should not throw out messages as long as there is capacity": + var queue = initQueue(2) + + check: + queue.add(initMessage(env0)) == true + queue.add(initMessage(env1)) == true + + queue.items.len() == 2 + + test "check field order against expected rlp order": + check rlp.encode(env0) == + rlp.encodeList(env0.expiry, env0.ttl, env0.topic, env0.data, env0.nonce) + +# To test filters we do not care if the msg is valid or allowed +proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](), + src = none[PrivateKey](), topic: Topic, + padding = none[seq[byte]]()): Message = + let payload = Payload(dst: pubKey, symKey: symKey, src: src, + payload: @[byte 0, 1, 2], padding: padding) + let encoded = whisper.encode(payload) + let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(), + nonce: 0) + result = initMessage(env) + +suite "Whisper filter": + test "should notify filter on message with symmetric encryption": + var symKey: SymKey + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(symKey = some(symKey), topic = topic) + + var filters = initTable[string, Filter]() + let filter = newFilter(symKey = some(symKey), topics = @[topic]) + let filterId = filters.subscribeFilter(filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isNone() + messages[0].dst.isNone() + + test "should notify filter on message with asymmetric encryption": + let privKey = keys.newPrivateKey() + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(pubKey = some(privKey.getPublicKey()), + topic = topic) + + var filters = initTable[string, Filter]() + let filter = newFilter(privateKey = some(privKey), topics = @[topic]) + let filterId = filters.subscribeFilter(filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isNone() + messages[0].dst.isSome() + + test "should notify filter on message with signature": + let privKey = keys.newPrivateKey() + let topic = [byte 0, 0, 0, 0] + let msg = prepFilterTestMsg(src = some(privKey), topic = topic) + + var filters = initTable[string, Filter]() + let filter = newFilter(src = some(privKey.getPublicKey()), + topics = @[topic]) + let filterId = filters.subscribeFilter(filter) + + notify(filters, msg) + + let messages = filters.getFilterMessages(filterId) + check: + messages.len == 1 + messages[0].decoded.src.isSome() + messages[0].dst.isNone() + + test "test notify of filter against PoW requirement": + let topic = [byte 0, 0, 0, 0] + let padding = some(repeat(byte 0, 251)) + # this message has a PoW of 0.02962962962962963, number should be updated + # in case PoW algorithm changes or contents of padding, payload, topic, etc. + # update: now with NON rlp encoded envelope size the PoW of this message is + # 0.014492753623188406 + let msg = prepFilterTestMsg(topic = topic, padding = padding) + + var filters = initTable[string, Filter]() + let + filterId1 = filters.subscribeFilter( + newFilter(topics = @[topic], powReq = 0.014492753623188406)) + filterId2 = filters.subscribeFilter( + newFilter(topics = @[topic], powReq = 0.014492753623188407)) + + notify(filters, msg) + + check: + filters.getFilterMessages(filterId1).len == 1 + filters.getFilterMessages(filterId2).len == 0 + + test "test notify of filter on message with certain topic": + let + topic1 = [byte 0xAB, 0x12, 0xCD, 0x34] + topic2 = [byte 0, 0, 0, 0] + + let msg = prepFilterTestMsg(topic = topic1) + + var filters = initTable[string, Filter]() + let + filterId1 = filters.subscribeFilter(newFilter(topics = @[topic1])) + filterId2 = filters.subscribeFilter(newFilter(topics = @[topic2])) + + notify(filters, msg) + + check: + filters.getFilterMessages(filterId1).len == 1 + filters.getFilterMessages(filterId2).len == 0 diff --git a/tests/p2p/test_waku_connect.nim b/tests/p2p/test_waku_connect.nim new file mode 100644 index 00000000..913712bc --- /dev/null +++ b/tests/p2p/test_waku_connect.nim @@ -0,0 +1,327 @@ +# +# Ethereum P2P +# (c) Copyright 2018 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +import + sequtils, options, unittest, tables, chronos, eth/[keys, p2p], + eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool, + ./p2p_test_helper + +proc resetMessageQueues(nodes: varargs[EthereumNode]) = + for node in nodes: + node.resetMessageQueue() + +let safeTTL = 5'u32 +let waitInterval = messageInterval + 150.milliseconds + +suite "Whisper connections": + var node1 = setupTestNode(Whisper) + var node2 = setupTestNode(Whisper) + node2.startListening() + waitFor node1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, + node2.address))) + asyncTest "Two peers connected": + check: + node1.peerPool.connectedNodes.len() == 1 + + asyncTest "Filters with encryption and signing": + let encryptKeyPair = newKeyPair() + let signKeyPair = newKeyPair() + var symKey: SymKey + let topic = [byte 0x12, 0, 0, 0] + var filters: seq[string] = @[] + var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), + repeat(byte 3, 10), repeat(byte 4, 10)] + var futures = [newFuture[int](), newFuture[int](), + newFuture[int](), newFuture[int]()] + + proc handler1(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1] + count += 1 + if count == 2: futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + proc handler3(msg: ReceivedMessage) = + var count {.global.}: int + check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3] + count += 1 + if count == 2: futures[2].complete(1) + proc handler4(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[3] + futures[3].complete(1) + + # Filters + # filter for encrypted asym + filters.add(node1.subscribeFilter(newFilter(privateKey = some(encryptKeyPair.seckey), + topics = @[topic]), handler1)) + # filter for encrypted asym + signed + filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), + privateKey = some(encryptKeyPair.seckey), + topics = @[topic]), handler2)) + # filter for encrypted sym + filters.add(node1.subscribeFilter(newFilter(symKey = some(symKey), + topics = @[topic]), handler3)) + # filter for encrypted sym + signed + filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), + symKey = some(symKey), + topics = @[topic]), handler4)) + # Messages + check: + # encrypted asym + node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, + topic = topic, payload = payloads[0]) == true + # encrypted asym + signed + node2.postMessage(some(encryptKeyPair.pubkey), + src = some(signKeyPair.seckey), ttl = safeTTL, + topic = topic, payload = payloads[1]) == true + # encrypted sym + node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, + payload = payloads[2]) == true + # encrypted sym + signed + node2.postMessage(symKey = some(symKey), + src = some(signKeyPair.seckey), + ttl = safeTTL, topic = topic, + payload = payloads[3]) == true + + node2.protocolState(Whisper).queue.items.len == 4 + + check: + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Whisper).queue.items.len == 4 + + for filter in filters: + check node1.unsubscribeFilter(filter) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with topics": + let topic1 = [byte 0x12, 0, 0, 0] + let topic2 = [byte 0x34, 0, 0, 0] + var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[0] + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + + var filter1 = node1.subscribeFilter(newFilter(topics = @[topic1]), handler1) + var filter2 = node1.subscribeFilter(newFilter(topics = @[topic2]), handler2) + + check: + node2.postMessage(ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + node2.postMessage(ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + node2.protocolState(Whisper).queue.items.len == 2 + + await allFutures(futures).withTimeout(waitInterval) + node1.protocolState(Whisper).queue.items.len == 2 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with PoW": + let topic = [byte 0x12, 0, 0, 0] + var payload = repeat(byte 0, 10) + var futures = [newFuture[int](), newFuture[int]()] + proc handler1(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[0].complete(1) + proc handler2(msg: ReceivedMessage) = + check msg.decoded.payload == payload + futures[1].complete(1) + + var filter1 = node1.subscribeFilter(newFilter(topics = @[topic], powReq = 0), + handler1) + var filter2 = node1.subscribeFilter(newFilter(topics = @[topic], + powReq = 1_000_000), handler2) + + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + + (await futures[0].withTimeout(waitInterval)) == true + (await futures[1].withTimeout(waitInterval)) == false + node1.protocolState(Whisper).queue.items.len == 1 + + node1.unsubscribeFilter(filter1) == true + node1.unsubscribeFilter(filter2) == true + + resetMessageQueues(node1, node2) + + asyncTest "Filters with queues": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + var filter = node1.subscribeFilter(newFilter(topics = @[topic])) + for i in countdown(10, 1): + check node2.postMessage(ttl = safeTTL, topic = topic, + payload = payload) == true + + await sleepAsync(waitInterval) + check: + node1.getFilterMessages(filter).len() == 10 + node1.getFilterMessages(filter).len() == 0 + node1.unsubscribeFilter(filter) == true + + resetMessageQueues(node1, node2) + + asyncTest "Local filter notify": + let topic = [byte 0, 0, 0, 0] + + var filter = node1.subscribeFilter(newFilter(topics = @[topic])) + check: + node1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 4, 10)) == true + node1.getFilterMessages(filter).len() == 1 + node1.unsubscribeFilter(filter) == true + + await sleepAsync(waitInterval) + resetMessageQueues(node1, node2) + + asyncTest "Bloomfilter blocking": + let sendTopic1 = [byte 0x12, 0, 0, 0] + let sendTopic2 = [byte 0x34, 0, 0, 0] + let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] + let payload = repeat(byte 0, 10) + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == payload + f.complete(1) + var filter = node1.subscribeFilter(newFilter(topics = filterTopics), handler) + await node1.setBloomFilter(node1.filtersToBloom()) + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic1, + payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + + (await f.withTimeout(waitInterval)) == false + node1.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + f = newFuture[int]() + + check: + node2.postMessage(ttl = safeTTL, topic = sendTopic2, + payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Whisper).queue.items.len == 1 + + node1.unsubscribeFilter(filter) == true + + await node1.setBloomFilter(fullBloom()) + + resetMessageQueues(node1, node2) + + asyncTest "PoW blocking": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + + await node1.setPowRequirement(1_000_000) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + await node1.setPowRequirement(0.0) + check: + node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + node2.protocolState(Whisper).queue.items.len == 1 + await sleepAsync(waitInterval) + check: + node1.protocolState(Whisper).queue.items.len == 1 + + resetMessageQueues(node1, node2) + + asyncTest "Queue pruning": + let topic = [byte 0, 0, 0, 0] + let payload = repeat(byte 0, 10) + # We need a minimum TTL of 2 as when set to 1 there is a small chance that + # it is already expired after messageInterval due to rounding down of float + # to uint32 in postMessage() + let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire + for i in countdown(10, 1): + check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true + check node2.protocolState(Whisper).queue.items.len == 10 + + await sleepAsync(waitInterval) + check node1.protocolState(Whisper).queue.items.len == 10 + + await sleepAsync(milliseconds((lowerTTL+1)*1000)) + check node1.protocolState(Whisper).queue.items.len == 0 + check node2.protocolState(Whisper).queue.items.len == 0 + + resetMessageQueues(node1, node2) + + asyncTest "P2P post": + let topic = [byte 0, 0, 0, 0] + var f: Future[int] = newFuture[int]() + proc handler(msg: ReceivedMessage) = + check msg.decoded.payload == repeat(byte 4, 10) + f.complete(1) + + var filter = node1.subscribeFilter(newFilter(topics = @[topic], + allowP2P = true), handler) + check: + node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true + node2.postMessage(ttl = 10, topic = topic, + payload = repeat(byte 4, 10), + targetPeer = some(toNodeId(node1.keys.pubkey))) == true + + await f.withTimeout(waitInterval) + f.read() == 1 + node1.protocolState(Whisper).queue.items.len == 0 + node2.protocolState(Whisper).queue.items.len == 0 + + node1.unsubscribeFilter(filter) == true + + asyncTest "Light node posting": + var ln1 = setupTestNode(Whisper) + ln1.setLightNode(true) + + await ln1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, + node2.address))) + + let topic = [byte 0, 0, 0, 0] + + check: + # normal post + ln1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 0, 10)) == false + ln1.protocolState(Whisper).queue.items.len == 0 + # P2P post + ln1.postMessage(ttl = safeTTL, topic = topic, + payload = repeat(byte 0, 10), + targetPeer = some(toNodeId(node2.keys.pubkey))) == true + ln1.protocolState(Whisper).queue.items.len == 0 + + asyncTest "Connect two light nodes": + var ln1 = setupTestNode(Whisper) + var ln2 = setupTestNode(Whisper) + + ln1.setLightNode(true) + ln2.setLightNode(true) + + ln2.startListening() + let peer = await ln1.rlpxConnect(newNode(initENode(ln2.keys.pubKey, + ln2.address))) + check peer.isNil == true From 4c3400cf3fe2602b392403636bdf521c5ce495a5 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 15:53:43 +0800 Subject: [PATCH 04/13] waku 0 replace whisper waku --- tests/p2p/test_waku.nim | 80 ++++++++++++++++----------------- tests/p2p/test_waku_connect.nim | 56 +++++++++++------------ 2 files changed, 68 insertions(+), 68 deletions(-) diff --git a/tests/p2p/test_waku.nim b/tests/p2p/test_waku.nim index 25aac941..37704bfd 100644 --- a/tests/p2p/test_waku.nim +++ b/tests/p2p/test_waku.nim @@ -11,14 +11,14 @@ import sequtils, options, unittest, times, tables, nimcrypto/hash, eth/[keys, rlp], - eth/p2p/rlpx_protocols/whisper_protocol as whisper + eth/p2p/rlpx_protocols/waku_protocol as waku -suite "Whisper payload": +suite "Waku payload": test "should roundtrip without keys": let payload = Payload(payload: @[byte 0, 1, 2]) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -28,9 +28,9 @@ suite "Whisper payload": test "should roundtrip with symmetric encryption": var symKey: SymKey let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2]) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get(), symKey = some(symKey)) + let decoded = waku.decode(encoded.get(), symKey = some(symKey)) check: decoded.isSome() payload.payload == decoded.get().payload @@ -41,9 +41,9 @@ suite "Whisper payload": let privKey = keys.newPrivateKey() let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2]) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -55,9 +55,9 @@ suite "Whisper payload": let payload = Payload(dst: some(privKey.getPublicKey()), payload: @[byte 0, 1, 2]) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get(), dst = some(privKey)) + let decoded = waku.decode(encoded.get(), dst = some(privKey)) check: decoded.isSome() payload.payload == decoded.get().payload @@ -65,7 +65,7 @@ suite "Whisper payload": decoded.get().padding.get().len == 251 # 256 -1 -1 -3 test "should return specified bloom": - # Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/whisper/whisperv6/whisper_test.go#L834 + # Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/waku/wakuv6/waku_test.go#L834 let top0 = [byte 0, 0, 255, 6] var x: Bloom x[0] = byte 1 @@ -73,12 +73,12 @@ suite "Whisper payload": x[^1] = byte 128 check @(top0.topicBloom) == @x -suite "Whisper payload padding": +suite "Waku payload padding": test "should do max padding": let payload = Payload(payload: repeat(byte 1, 254)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -89,9 +89,9 @@ suite "Whisper payload padding": let privKey = keys.newPrivateKey() let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -101,9 +101,9 @@ suite "Whisper payload padding": test "should do min padding": let payload = Payload(payload: repeat(byte 1, 253)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -114,9 +114,9 @@ suite "Whisper payload padding": let privKey = keys.newPrivateKey() let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -127,9 +127,9 @@ suite "Whisper payload padding": test "should roundtrip custom padding": let payload = Payload(payload: repeat(byte 1, 10), padding: some(repeat(byte 2, 100))) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -140,9 +140,9 @@ suite "Whisper payload padding": let padding: seq[byte] = @[] let payload = Payload(payload: repeat(byte 1, 10), padding: some(padding)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -152,9 +152,9 @@ suite "Whisper payload padding": let privKey = keys.newPrivateKey() let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), padding: some(repeat(byte 2, 100))) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload @@ -167,16 +167,16 @@ suite "Whisper payload padding": let privKey = keys.newPrivateKey() let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), padding: some(padding)) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) - let decoded = whisper.decode(encoded.get()) + let decoded = waku.decode(encoded.get()) check: decoded.isSome() payload.payload == decoded.get().payload privKey.getPublicKey() == decoded.get().src.get() decoded.get().padding.isNone() -# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/whisper/src/message.rs#L439 +# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/waku/src/message.rs#L439 let env0 = Envelope( expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], @@ -185,12 +185,12 @@ let expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], data: repeat(byte 9, 256), nonce: 1010102) -suite "Whisper envelope": +suite "Waku envelope": proc hashAndPow(env: Envelope): (string, float64) = # This is the current implementation of go-ethereum let size = env.toShortRlp().len().uint32 - # This is our current implementation in `whisper_protocol.nim` + # This is our current implementation in `waku_protocol.nim` # let size = env.len().uint32 # This is the EIP-627 specification # let size = env.toRlp().len().uint32 @@ -214,7 +214,7 @@ suite "Whisper envelope": 57896044618658100000000000000000000000000000000000000000000000000000000000000.0 calcPow(1, 1, Hash.fromHex(testHashes[2])) == 1.0 - # Test values from go-ethereum whisperv6 in envelope_test + # Test values from go-ethereum wakuv6 in envelope_test var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef]) # PoW calculation with no leading zeroes env.nonce = 100000 @@ -228,7 +228,7 @@ suite "Whisper envelope": test "should validate and allow envelope according to config": let ttl = 1'u32 let topic = [byte 1, 2, 3, 4] - let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), isLightNode: false, maxMsgSize: defaultMaxMsgSize) let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, @@ -241,7 +241,7 @@ suite "Whisper envelope": test "should invalidate envelope due to ttl 0": let ttl = 0'u32 let topic = [byte 1, 2, 3, 4] - let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), isLightNode: false, maxMsgSize: defaultMaxMsgSize) let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, @@ -251,7 +251,7 @@ suite "Whisper envelope": test "should invalidate envelope due to expired": let ttl = 1'u32 let topic = [byte 1, 2, 3, 4] - let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), isLightNode: false, maxMsgSize: defaultMaxMsgSize) let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic, @@ -261,7 +261,7 @@ suite "Whisper envelope": test "should invalidate envelope due to in the future": let ttl = 1'u32 let topic = [byte 1, 2, 3, 4] - let config = WhisperConfig(powRequirement: 0, bloom: topic.topicBloom(), + let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), isLightNode: false, maxMsgSize: defaultMaxMsgSize) # there is currently a 2 second tolerance, hence the + 3 @@ -272,7 +272,7 @@ suite "Whisper envelope": test "should not allow envelope due to bloom filter": let topic = [byte 1, 2, 3, 4] let wrongTopic = [byte 9, 8, 7, 6] - let config = WhisperConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(), + let config = WakuConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(), isLightNode: false, maxMsgSize: defaultMaxMsgSize) let env = Envelope(expiry:100000 , ttl: 30, topic: topic, @@ -282,7 +282,7 @@ suite "Whisper envelope": check msg.allowed(config) == false -suite "Whisper queue": +suite "Waku queue": test "should throw out lower proof-of-work item when full": var queue = initQueue(1) @@ -316,12 +316,12 @@ proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](), padding = none[seq[byte]]()): Message = let payload = Payload(dst: pubKey, symKey: symKey, src: src, payload: @[byte 0, 1, 2], padding: padding) - let encoded = whisper.encode(payload) + let encoded = waku.encode(payload) let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(), nonce: 0) result = initMessage(env) -suite "Whisper filter": +suite "Waku filter": test "should notify filter on message with symmetric encryption": var symKey: SymKey let topic = [byte 0, 0, 0, 0] diff --git a/tests/p2p/test_waku_connect.nim b/tests/p2p/test_waku_connect.nim index 913712bc..7db360ef 100644 --- a/tests/p2p/test_waku_connect.nim +++ b/tests/p2p/test_waku_connect.nim @@ -9,7 +9,7 @@ import sequtils, options, unittest, tables, chronos, eth/[keys, p2p], - eth/p2p/rlpx_protocols/whisper_protocol, eth/p2p/peer_pool, + eth/p2p/rlpx_protocols/waku_protocol, eth/p2p/peer_pool, ./p2p_test_helper proc resetMessageQueues(nodes: varargs[EthereumNode]) = @@ -19,9 +19,9 @@ proc resetMessageQueues(nodes: varargs[EthereumNode]) = let safeTTL = 5'u32 let waitInterval = messageInterval + 150.milliseconds -suite "Whisper connections": - var node1 = setupTestNode(Whisper) - var node2 = setupTestNode(Whisper) +suite "Waku connections": + var node1 = setupTestNode(Waku) + var node2 = setupTestNode(Waku) node2.startListening() waitFor node1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, node2.address))) @@ -90,11 +90,11 @@ suite "Whisper connections": ttl = safeTTL, topic = topic, payload = payloads[3]) == true - node2.protocolState(Whisper).queue.items.len == 4 + node2.protocolState(Waku).queue.items.len == 4 check: await allFutures(futures).withTimeout(waitInterval) - node1.protocolState(Whisper).queue.items.len == 4 + node1.protocolState(Waku).queue.items.len == 4 for filter in filters: check node1.unsubscribeFilter(filter) == true @@ -121,10 +121,10 @@ suite "Whisper connections": payload = payloads[0]) == true node2.postMessage(ttl = safeTTL, topic = topic2, payload = payloads[1]) == true - node2.protocolState(Whisper).queue.items.len == 2 + node2.protocolState(Waku).queue.items.len == 2 await allFutures(futures).withTimeout(waitInterval) - node1.protocolState(Whisper).queue.items.len == 2 + node1.protocolState(Waku).queue.items.len == 2 node1.unsubscribeFilter(filter1) == true node1.unsubscribeFilter(filter2) == true @@ -152,7 +152,7 @@ suite "Whisper connections": (await futures[0].withTimeout(waitInterval)) == true (await futures[1].withTimeout(waitInterval)) == false - node1.protocolState(Whisper).queue.items.len == 1 + node1.protocolState(Waku).queue.items.len == 1 node1.unsubscribeFilter(filter1) == true node1.unsubscribeFilter(filter2) == true @@ -204,10 +204,10 @@ suite "Whisper connections": check: node2.postMessage(ttl = safeTTL, topic = sendTopic1, payload = payload) == true - node2.protocolState(Whisper).queue.items.len == 1 + node2.protocolState(Waku).queue.items.len == 1 (await f.withTimeout(waitInterval)) == false - node1.protocolState(Whisper).queue.items.len == 0 + node1.protocolState(Waku).queue.items.len == 0 resetMessageQueues(node1, node2) @@ -216,11 +216,11 @@ suite "Whisper connections": check: node2.postMessage(ttl = safeTTL, topic = sendTopic2, payload = payload) == true - node2.protocolState(Whisper).queue.items.len == 1 + node2.protocolState(Waku).queue.items.len == 1 await f.withTimeout(waitInterval) f.read() == 1 - node1.protocolState(Whisper).queue.items.len == 1 + node1.protocolState(Waku).queue.items.len == 1 node1.unsubscribeFilter(filter) == true @@ -235,20 +235,20 @@ suite "Whisper connections": await node1.setPowRequirement(1_000_000) check: node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - node2.protocolState(Whisper).queue.items.len == 1 + node2.protocolState(Waku).queue.items.len == 1 await sleepAsync(waitInterval) check: - node1.protocolState(Whisper).queue.items.len == 0 + node1.protocolState(Waku).queue.items.len == 0 resetMessageQueues(node1, node2) await node1.setPowRequirement(0.0) check: node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - node2.protocolState(Whisper).queue.items.len == 1 + node2.protocolState(Waku).queue.items.len == 1 await sleepAsync(waitInterval) check: - node1.protocolState(Whisper).queue.items.len == 1 + node1.protocolState(Waku).queue.items.len == 1 resetMessageQueues(node1, node2) @@ -261,14 +261,14 @@ suite "Whisper connections": let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire for i in countdown(10, 1): check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true - check node2.protocolState(Whisper).queue.items.len == 10 + check node2.protocolState(Waku).queue.items.len == 10 await sleepAsync(waitInterval) - check node1.protocolState(Whisper).queue.items.len == 10 + check node1.protocolState(Waku).queue.items.len == 10 await sleepAsync(milliseconds((lowerTTL+1)*1000)) - check node1.protocolState(Whisper).queue.items.len == 0 - check node2.protocolState(Whisper).queue.items.len == 0 + check node1.protocolState(Waku).queue.items.len == 0 + check node2.protocolState(Waku).queue.items.len == 0 resetMessageQueues(node1, node2) @@ -289,13 +289,13 @@ suite "Whisper connections": await f.withTimeout(waitInterval) f.read() == 1 - node1.protocolState(Whisper).queue.items.len == 0 - node2.protocolState(Whisper).queue.items.len == 0 + node1.protocolState(Waku).queue.items.len == 0 + node2.protocolState(Waku).queue.items.len == 0 node1.unsubscribeFilter(filter) == true asyncTest "Light node posting": - var ln1 = setupTestNode(Whisper) + var ln1 = setupTestNode(Waku) ln1.setLightNode(true) await ln1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, @@ -307,16 +307,16 @@ suite "Whisper connections": # normal post ln1.postMessage(ttl = safeTTL, topic = topic, payload = repeat(byte 0, 10)) == false - ln1.protocolState(Whisper).queue.items.len == 0 + ln1.protocolState(Waku).queue.items.len == 0 # P2P post ln1.postMessage(ttl = safeTTL, topic = topic, payload = repeat(byte 0, 10), targetPeer = some(toNodeId(node2.keys.pubkey))) == true - ln1.protocolState(Whisper).queue.items.len == 0 + ln1.protocolState(Waku).queue.items.len == 0 asyncTest "Connect two light nodes": - var ln1 = setupTestNode(Whisper) - var ln2 = setupTestNode(Whisper) + var ln1 = setupTestNode(Waku) + var ln2 = setupTestNode(Waku) ln1.setLightNode(true) ln2.setLightNode(true) From dcb8b52b418bdb38f9171de78cd4825e6a452664 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 16:56:56 +0800 Subject: [PATCH 05/13] Waku: rename capability to wkk According to `doc/p2p.md` capability should be three letters. I can't see this in https://github.com/ethereum/devp2p/blob/master/rlpx.md#capability-messaging though, but better safe than sorry. This also dismabiguates and stays somewhat consistent in vocabulary: Whisper -> Waku whisper -> waku shh -> wkk --- eth/p2p/rlpx_protocols/waku_protocol.nim | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index cfe94b67..245b82d9 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -521,7 +521,7 @@ proc initMessage*(env: Envelope, powCalc = true): Message = proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data) proc allowed*(msg: Message, config: WakuConfig): bool = - # Check max msg size, already happens in RLPx but there is a specific waku + # Check max msg size, already happens in RLPx but there is a specific wkk # max msg size which should always be < RLPx max msg size if msg.size > config.maxMsgSize: warn "Message size too large", size = msg.size @@ -747,9 +747,9 @@ proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = asyncCheck node.run(network) p2pProtocol Waku(version = wakuVersion, - rlpxName = "waku", - peerState = WakuPeer, - networkState = WakuNetwork): + rlpxName = "wkk", + peerState = WakuPeer, + networkState = WakuNetwork): onPeerConnected do (peer: Peer): trace "onPeerConnected Waku" From bd191be19531a77ddb51ffa8282e3b2a30b5e067 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Fri, 15 Nov 2019 16:59:55 +0800 Subject: [PATCH 06/13] Waku Fix crash due to zero version --- eth/p2p/rlpx_protocols/waku_protocol.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index 245b82d9..015703da 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -53,7 +53,8 @@ const bloomSize = 512 div 8 defaultQueueCapacity = 256 defaultFilterQueueCapacity = 64 - wakuVersion* = 0 ## Waku version. + # XXX: Crashes if version is set to 0, P2P DSL bug or spec concern? + wakuVersion* = 1 ## Waku version. wakuVersionStr* = $wakuVersion ## Waku version. defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max From 53cd0847ba026bad470527d605ecc7cd15d0dd81 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Sat, 16 Nov 2019 16:23:36 +0800 Subject: [PATCH 07/13] Fix waku connect tests - WakuWhisper and Whisper peers connect - WakuWhisper node picks up Whisper messages --- tests/p2p/test_waku_connect.nim | 631 ++++++++++++++++++-------------- 1 file changed, 347 insertions(+), 284 deletions(-) diff --git a/tests/p2p/test_waku_connect.nim b/tests/p2p/test_waku_connect.nim index 7db360ef..88ad58f7 100644 --- a/tests/p2p/test_waku_connect.nim +++ b/tests/p2p/test_waku_connect.nim @@ -9,15 +9,17 @@ import sequtils, options, unittest, tables, chronos, eth/[keys, p2p], - eth/p2p/rlpx_protocols/waku_protocol, eth/p2p/peer_pool, - ./p2p_test_helper + eth/p2p/peer_pool, ./p2p_test_helper -proc resetMessageQueues(nodes: varargs[EthereumNode]) = - for node in nodes: - node.resetMessageQueue() +import eth/p2p/rlpx_protocols/waku_protocol as waku +import eth/p2p/rlpx_protocols/whisper_protocol as whisper + +# proc resetMessageQueues(nodes: varargs[EthereumNode]) = +# for node in nodes: +# resetMessageQueue(node) let safeTTL = 5'u32 -let waitInterval = messageInterval + 150.milliseconds +let waitInterval = waku.messageInterval + 150.milliseconds suite "Waku connections": var node1 = setupTestNode(Waku) @@ -25,303 +27,364 @@ suite "Waku connections": node2.startListening() waitFor node1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, node2.address))) - asyncTest "Two peers connected": - check: - node1.peerPool.connectedNodes.len() == 1 - - asyncTest "Filters with encryption and signing": - let encryptKeyPair = newKeyPair() - let signKeyPair = newKeyPair() - var symKey: SymKey - let topic = [byte 0x12, 0, 0, 0] - var filters: seq[string] = @[] - var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), - repeat(byte 3, 10), repeat(byte 4, 10)] - var futures = [newFuture[int](), newFuture[int](), - newFuture[int](), newFuture[int]()] - - proc handler1(msg: ReceivedMessage) = - var count {.global.}: int - check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1] - count += 1 - if count == 2: futures[0].complete(1) - proc handler2(msg: ReceivedMessage) = - check msg.decoded.payload == payloads[1] - futures[1].complete(1) - proc handler3(msg: ReceivedMessage) = - var count {.global.}: int - check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3] - count += 1 - if count == 2: futures[2].complete(1) - proc handler4(msg: ReceivedMessage) = - check msg.decoded.payload == payloads[3] - futures[3].complete(1) - - # Filters - # filter for encrypted asym - filters.add(node1.subscribeFilter(newFilter(privateKey = some(encryptKeyPair.seckey), - topics = @[topic]), handler1)) - # filter for encrypted asym + signed - filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), - privateKey = some(encryptKeyPair.seckey), - topics = @[topic]), handler2)) - # filter for encrypted sym - filters.add(node1.subscribeFilter(newFilter(symKey = some(symKey), - topics = @[topic]), handler3)) - # filter for encrypted sym + signed - filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), - symKey = some(symKey), - topics = @[topic]), handler4)) - # Messages - check: - # encrypted asym - node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, - topic = topic, payload = payloads[0]) == true - # encrypted asym + signed - node2.postMessage(some(encryptKeyPair.pubkey), - src = some(signKeyPair.seckey), ttl = safeTTL, - topic = topic, payload = payloads[1]) == true - # encrypted sym - node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, - payload = payloads[2]) == true - # encrypted sym + signed - node2.postMessage(symKey = some(symKey), - src = some(signKeyPair.seckey), - ttl = safeTTL, topic = topic, - payload = payloads[3]) == true - - node2.protocolState(Waku).queue.items.len == 4 + # Waku Whisper has both capabilities and listens to Whisper, then relays traffic + var nodeWakuWhisper = setupTestNode(Waku, Whisper) + # XXX: Assuming we added Whisper capability here + var nodeWhisper = setupTestNode(Whisper) + # TODO: Connect them + nodeWakuWhisper.startListening() + waitFor nodeWhisper.peerPool.connectToNode(newNode(initENode(nodeWakuWhisper.keys.pubKey, + nodeWakuWhisper.address))) + + # NOTE: Commented out Whisper equivalent tests + # To enable, fully qualify nodes + + # asyncTest "Two peers connected": + # check: + # node1.peerPool.connectedNodes.len() == 1 + + # asyncTest "Filters with encryption and signing": + # let encryptKeyPair = newKeyPair() + # let signKeyPair = newKeyPair() + # var symKey: SymKey + # let topic = [byte 0x12, 0, 0, 0] + # var filters: seq[string] = @[] + # var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), + # repeat(byte 3, 10), repeat(byte 4, 10)] + # var futures = [newFuture[int](), newFuture[int](), + # newFuture[int](), newFuture[int]()] + + # proc handler1(msg: ReceivedMessage) = + # var count {.global.}: int + # check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1] + # count += 1 + # if count == 2: futures[0].complete(1) + # proc handler2(msg: ReceivedMessage) = + # check msg.decoded.payload == payloads[1] + # futures[1].complete(1) + # proc handler3(msg: ReceivedMessage) = + # var count {.global.}: int + # check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3] + # count += 1 + # if count == 2: futures[2].complete(1) + # proc handler4(msg: ReceivedMessage) = + # check msg.decoded.payload == payloads[3] + # futures[3].complete(1) + + # # Filters + # # filter for encrypted asym + # filters.add(node1.subscribeFilter(newFilter(privateKey = some(encryptKeyPair.seckey), + # topics = @[topic]), handler1)) + # # filter for encrypted asym + signed + # filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), + # privateKey = some(encryptKeyPair.seckey), + # topics = @[topic]), handler2)) + # # filter for encrypted sym + # filters.add(node1.subscribeFilter(newFilter(symKey = some(symKey), + # topics = @[topic]), handler3)) + # # filter for encrypted sym + signed + # filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), + # symKey = some(symKey), + # topics = @[topic]), handler4)) + # # Messages + # check: + # # encrypted asym + # node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, + # topic = topic, payload = payloads[0]) == true + # # encrypted asym + signed + # node2.postMessage(some(encryptKeyPair.pubkey), + # src = some(signKeyPair.seckey), ttl = safeTTL, + # topic = topic, payload = payloads[1]) == true + # # encrypted sym + # node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, + # payload = payloads[2]) == true + # # encrypted sym + signed + # node2.postMessage(symKey = some(symKey), + # src = some(signKeyPair.seckey), + # ttl = safeTTL, topic = topic, + # payload = payloads[3]) == true + + # node2.protocolState(Waku).queue.items.len == 4 + + # check: + # await allFutures(futures).withTimeout(waitInterval) + # node1.protocolState(Waku).queue.items.len == 4 + + # for filter in filters: + # check node1.unsubscribeFilter(filter) == true + + # resetMessageQueues(node1, node2) + + # asyncTest "Filters with topics": + # let topic1 = [byte 0x12, 0, 0, 0] + # let topic2 = [byte 0x34, 0, 0, 0] + # var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + # var futures = [newFuture[int](), newFuture[int]()] + # proc handler1(msg: ReceivedMessage) = + # check msg.decoded.payload == payloads[0] + # futures[0].complete(1) + # proc handler2(msg: ReceivedMessage) = + # check msg.decoded.payload == payloads[1] + # futures[1].complete(1) + + # var filter1 = node1.subscribeFilter(newFilter(topics = @[topic1]), handler1) + # var filter2 = node1.subscribeFilter(newFilter(topics = @[topic2]), handler2) + + # check: + # node2.postMessage(ttl = safeTTL + 1, topic = topic1, + # payload = payloads[0]) == true + # node2.postMessage(ttl = safeTTL, topic = topic2, + # payload = payloads[1]) == true + # node2.protocolState(Waku).queue.items.len == 2 + + # await allFutures(futures).withTimeout(waitInterval) + # node1.protocolState(Waku).queue.items.len == 2 + + # node1.unsubscribeFilter(filter1) == true + # node1.unsubscribeFilter(filter2) == true + + # resetMessageQueues(node1, node2) + + # asyncTest "Filters with PoW": + # let topic = [byte 0x12, 0, 0, 0] + # var payload = repeat(byte 0, 10) + # var futures = [newFuture[int](), newFuture[int]()] + # proc handler1(msg: ReceivedMessage) = + # check msg.decoded.payload == payload + # futures[0].complete(1) + # proc handler2(msg: ReceivedMessage) = + # check msg.decoded.payload == payload + # futures[1].complete(1) + + # var filter1 = node1.subscribeFilter(newFilter(topics = @[topic], powReq = 0), + # handler1) + # var filter2 = node1.subscribeFilter(newFilter(topics = @[topic], + # powReq = 1_000_000), handler2) + + # check: + # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + + # (await futures[0].withTimeout(waitInterval)) == true + # (await futures[1].withTimeout(waitInterval)) == false + # node1.protocolState(Waku).queue.items.len == 1 + + # node1.unsubscribeFilter(filter1) == true + # node1.unsubscribeFilter(filter2) == true + + # resetMessageQueues(node1, node2) + + # asyncTest "Filters with queues": + # let topic = [byte 0, 0, 0, 0] + # let payload = repeat(byte 0, 10) + + # var filter = node1.subscribeFilter(newFilter(topics = @[topic])) + # for i in countdown(10, 1): + # check node2.postMessage(ttl = safeTTL, topic = topic, + # payload = payload) == true + + # await sleepAsync(waitInterval) + # check: + # node1.getFilterMessages(filter).len() == 10 + # node1.getFilterMessages(filter).len() == 0 + # node1.unsubscribeFilter(filter) == true + + # resetMessageQueues(node1, node2) + + # asyncTest "Local filter notify": + # let topic = [byte 0, 0, 0, 0] + + # var filter = node1.subscribeFilter(newFilter(topics = @[topic])) + # check: + # node1.postMessage(ttl = safeTTL, topic = topic, + # payload = repeat(byte 4, 10)) == true + # node1.getFilterMessages(filter).len() == 1 + # node1.unsubscribeFilter(filter) == true + + # await sleepAsync(waitInterval) + # resetMessageQueues(node1, node2) + + # asyncTest "Bloomfilter blocking": + # let sendTopic1 = [byte 0x12, 0, 0, 0] + # let sendTopic2 = [byte 0x34, 0, 0, 0] + # let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] + # let payload = repeat(byte 0, 10) + # var f: Future[int] = newFuture[int]() + # proc handler(msg: ReceivedMessage) = + # check msg.decoded.payload == payload + # f.complete(1) + # var filter = node1.subscribeFilter(newFilter(topics = filterTopics), handler) + # await node1.setBloomFilter(node1.filtersToBloom()) + + # check: + # node2.postMessage(ttl = safeTTL, topic = sendTopic1, + # payload = payload) == true + # node2.protocolState(Waku).queue.items.len == 1 + + # (await f.withTimeout(waitInterval)) == false + # node1.protocolState(Waku).queue.items.len == 0 + + # resetMessageQueues(node1, node2) + + # f = newFuture[int]() + + # check: + # node2.postMessage(ttl = safeTTL, topic = sendTopic2, + # payload = payload) == true + # node2.protocolState(Waku).queue.items.len == 1 + + # await f.withTimeout(waitInterval) + # f.read() == 1 + # node1.protocolState(Waku).queue.items.len == 1 + + # node1.unsubscribeFilter(filter) == true + + # await node1.setBloomFilter(fullBloom()) + + # resetMessageQueues(node1, node2) + + # asyncTest "PoW blocking": + # let topic = [byte 0, 0, 0, 0] + # let payload = repeat(byte 0, 10) + + # await node1.setPowRequirement(1_000_000) + # check: + # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + # node2.protocolState(Waku).queue.items.len == 1 + # await sleepAsync(waitInterval) + # check: + # node1.protocolState(Waku).queue.items.len == 0 + + # resetMessageQueues(node1, node2) + + # await node1.setPowRequirement(0.0) + # check: + # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true + # node2.protocolState(Waku).queue.items.len == 1 + # await sleepAsync(waitInterval) + # check: + # node1.protocolState(Waku).queue.items.len == 1 + + # resetMessageQueues(node1, node2) + + # asyncTest "Queue pruning": + # let topic = [byte 0, 0, 0, 0] + # let payload = repeat(byte 0, 10) + # # We need a minimum TTL of 2 as when set to 1 there is a small chance that + # # it is already expired after messageInterval due to rounding down of float + # # to uint32 in postMessage() + # let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire + # for i in countdown(10, 1): + # check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true + # check node2.protocolState(Waku).queue.items.len == 10 + + # await sleepAsync(waitInterval) + # check node1.protocolState(Waku).queue.items.len == 10 + + # await sleepAsync(milliseconds((lowerTTL+1)*1000)) + # check node1.protocolState(Waku).queue.items.len == 0 + # check node2.protocolState(Waku).queue.items.len == 0 + + # resetMessageQueues(node1, node2) + + # asyncTest "P2P post": + # let topic = [byte 0, 0, 0, 0] + # var f: Future[int] = newFuture[int]() + # proc handler(msg: ReceivedMessage) = + # check msg.decoded.payload == repeat(byte 4, 10) + # f.complete(1) + + # var filter = node1.subscribeFilter(newFilter(topics = @[topic], + # allowP2P = true), handler) + # check: + # node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true + # node2.postMessage(ttl = 10, topic = topic, + # payload = repeat(byte 4, 10), + # targetPeer = some(toNodeId(node1.keys.pubkey))) == true + + # await f.withTimeout(waitInterval) + # f.read() == 1 + # node1.protocolState(Waku).queue.items.len == 0 + # node2.protocolState(Waku).queue.items.len == 0 + + # node1.unsubscribeFilter(filter) == true + + # asyncTest "Light node posting": + # var ln1 = setupTestNode(Waku) + # ln1.setLightNode(true) + + # await ln1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, + # node2.address))) + + # let topic = [byte 0, 0, 0, 0] + + # check: + # # normal post + # ln1.postMessage(ttl = safeTTL, topic = topic, + # payload = repeat(byte 0, 10)) == false + # ln1.protocolState(Waku).queue.items.len == 0 + # # P2P post + # ln1.postMessage(ttl = safeTTL, topic = topic, + # payload = repeat(byte 0, 10), + # targetPeer = some(toNodeId(node2.keys.pubkey))) == true + # ln1.protocolState(Waku).queue.items.len == 0 + + # asyncTest "Connect two light nodes": + # var ln1 = setupTestNode(Waku) + # var ln2 = setupTestNode(Waku) + + # ln1.setLightNode(true) + # ln2.setLightNode(true) + + # ln2.startListening() + # let peer = await ln1.rlpxConnect(newNode(initENode(ln2.keys.pubKey, + # ln2.address))) + # check peer.isNil == true + + asyncTest "WakuWhisper and Whisper peers connected": check: - await allFutures(futures).withTimeout(waitInterval) - node1.protocolState(Waku).queue.items.len == 4 - - for filter in filters: - check node1.unsubscribeFilter(filter) == true + nodeWakuWhisper.peerPool.connectedNodes.len() == 1 - resetMessageQueues(node1, node2) - - asyncTest "Filters with topics": + asyncTest "WhisperWaku and Whisper filters with topics": let topic1 = [byte 0x12, 0, 0, 0] let topic2 = [byte 0x34, 0, 0, 0] var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] var futures = [newFuture[int](), newFuture[int]()] - proc handler1(msg: ReceivedMessage) = + + proc handler1(msg: whisper.ReceivedMessage) = check msg.decoded.payload == payloads[0] futures[0].complete(1) - proc handler2(msg: ReceivedMessage) = + proc handler2(msg: whisper.ReceivedMessage) = check msg.decoded.payload == payloads[1] futures[1].complete(1) - var filter1 = node1.subscribeFilter(newFilter(topics = @[topic1]), handler1) - var filter2 = node1.subscribeFilter(newFilter(topics = @[topic2]), handler2) + var filter1 = nodeWakuWhisper.subscribeFilter(whisper.newFilter(topics = @[topic1]), handler1) + var filter2 = nodeWakuWhisper.subscribeFilter(whisper.newFilter(topics = @[topic2]), handler2) check: - node2.postMessage(ttl = safeTTL + 1, topic = topic1, - payload = payloads[0]) == true - node2.postMessage(ttl = safeTTL, topic = topic2, - payload = payloads[1]) == true - node2.protocolState(Waku).queue.items.len == 2 + whisper.postMessage(nodeWhisper, ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + nodeWhisper.protocolState(Whisper).queue.items.len == 2 await allFutures(futures).withTimeout(waitInterval) - node1.protocolState(Waku).queue.items.len == 2 - - node1.unsubscribeFilter(filter1) == true - node1.unsubscribeFilter(filter2) == true - - resetMessageQueues(node1, node2) - - asyncTest "Filters with PoW": - let topic = [byte 0x12, 0, 0, 0] - var payload = repeat(byte 0, 10) - var futures = [newFuture[int](), newFuture[int]()] - proc handler1(msg: ReceivedMessage) = - check msg.decoded.payload == payload - futures[0].complete(1) - proc handler2(msg: ReceivedMessage) = - check msg.decoded.payload == payload - futures[1].complete(1) - - var filter1 = node1.subscribeFilter(newFilter(topics = @[topic], powReq = 0), - handler1) - var filter2 = node1.subscribeFilter(newFilter(topics = @[topic], - powReq = 1_000_000), handler2) - - check: - node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - - (await futures[0].withTimeout(waitInterval)) == true - (await futures[1].withTimeout(waitInterval)) == false - node1.protocolState(Waku).queue.items.len == 1 - - node1.unsubscribeFilter(filter1) == true - node1.unsubscribeFilter(filter2) == true - - resetMessageQueues(node1, node2) - - asyncTest "Filters with queues": - let topic = [byte 0, 0, 0, 0] - let payload = repeat(byte 0, 10) - - var filter = node1.subscribeFilter(newFilter(topics = @[topic])) - for i in countdown(10, 1): - check node2.postMessage(ttl = safeTTL, topic = topic, - payload = payload) == true - - await sleepAsync(waitInterval) - check: - node1.getFilterMessages(filter).len() == 10 - node1.getFilterMessages(filter).len() == 0 - node1.unsubscribeFilter(filter) == true - resetMessageQueues(node1, node2) + # This shows WakuWhisper can receive Whisper messages + # TODO: This should also make its way to Waku state! Where? + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2 - asyncTest "Local filter notify": - let topic = [byte 0, 0, 0, 0] + # XXX: How does this look with protocol state for waku and whisper? + whisper.unsubscribeFilter(nodeWakuWhisper, filter1) == true + whisper.unsubscribeFilter(nodeWakuWhisper, filter2) == true - var filter = node1.subscribeFilter(newFilter(topics = @[topic])) - check: - node1.postMessage(ttl = safeTTL, topic = topic, - payload = repeat(byte 4, 10)) == true - node1.getFilterMessages(filter).len() == 1 - node1.unsubscribeFilter(filter) == true - - await sleepAsync(waitInterval) - resetMessageQueues(node1, node2) - - asyncTest "Bloomfilter blocking": - let sendTopic1 = [byte 0x12, 0, 0, 0] - let sendTopic2 = [byte 0x34, 0, 0, 0] - let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] - let payload = repeat(byte 0, 10) - var f: Future[int] = newFuture[int]() - proc handler(msg: ReceivedMessage) = - check msg.decoded.payload == payload - f.complete(1) - var filter = node1.subscribeFilter(newFilter(topics = filterTopics), handler) - await node1.setBloomFilter(node1.filtersToBloom()) + # XXX: This reads a bit weird, but eh + waku.resetMessageQueue(nodeWakuWhisper) + whisper.resetMessageQueue(nodeWakuWhisper) + whisper.resetMessageQueue(nodeWhisper) check: - node2.postMessage(ttl = safeTTL, topic = sendTopic1, - payload = payload) == true - node2.protocolState(Waku).queue.items.len == 1 - - (await f.withTimeout(waitInterval)) == false - node1.protocolState(Waku).queue.items.len == 0 - - resetMessageQueues(node1, node2) - - f = newFuture[int]() - - check: - node2.postMessage(ttl = safeTTL, topic = sendTopic2, - payload = payload) == true - node2.protocolState(Waku).queue.items.len == 1 - - await f.withTimeout(waitInterval) - f.read() == 1 - node1.protocolState(Waku).queue.items.len == 1 + nodeWhisper.protocolState(Whisper).queue.items.len == 0 + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0 - node1.unsubscribeFilter(filter) == true - - await node1.setBloomFilter(fullBloom()) - - resetMessageQueues(node1, node2) - - asyncTest "PoW blocking": - let topic = [byte 0, 0, 0, 0] - let payload = repeat(byte 0, 10) - - await node1.setPowRequirement(1_000_000) - check: - node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - node2.protocolState(Waku).queue.items.len == 1 - await sleepAsync(waitInterval) - check: - node1.protocolState(Waku).queue.items.len == 0 - - resetMessageQueues(node1, node2) - - await node1.setPowRequirement(0.0) - check: - node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - node2.protocolState(Waku).queue.items.len == 1 - await sleepAsync(waitInterval) - check: - node1.protocolState(Waku).queue.items.len == 1 - - resetMessageQueues(node1, node2) - - asyncTest "Queue pruning": - let topic = [byte 0, 0, 0, 0] - let payload = repeat(byte 0, 10) - # We need a minimum TTL of 2 as when set to 1 there is a small chance that - # it is already expired after messageInterval due to rounding down of float - # to uint32 in postMessage() - let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire - for i in countdown(10, 1): - check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true - check node2.protocolState(Waku).queue.items.len == 10 - - await sleepAsync(waitInterval) - check node1.protocolState(Waku).queue.items.len == 10 - - await sleepAsync(milliseconds((lowerTTL+1)*1000)) - check node1.protocolState(Waku).queue.items.len == 0 - check node2.protocolState(Waku).queue.items.len == 0 - - resetMessageQueues(node1, node2) - - asyncTest "P2P post": - let topic = [byte 0, 0, 0, 0] - var f: Future[int] = newFuture[int]() - proc handler(msg: ReceivedMessage) = - check msg.decoded.payload == repeat(byte 4, 10) - f.complete(1) - - var filter = node1.subscribeFilter(newFilter(topics = @[topic], - allowP2P = true), handler) - check: - node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true - node2.postMessage(ttl = 10, topic = topic, - payload = repeat(byte 4, 10), - targetPeer = some(toNodeId(node1.keys.pubkey))) == true - - await f.withTimeout(waitInterval) - f.read() == 1 - node1.protocolState(Waku).queue.items.len == 0 - node2.protocolState(Waku).queue.items.len == 0 - - node1.unsubscribeFilter(filter) == true - - asyncTest "Light node posting": - var ln1 = setupTestNode(Waku) - ln1.setLightNode(true) - - await ln1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, - node2.address))) - - let topic = [byte 0, 0, 0, 0] - - check: - # normal post - ln1.postMessage(ttl = safeTTL, topic = topic, - payload = repeat(byte 0, 10)) == false - ln1.protocolState(Waku).queue.items.len == 0 - # P2P post - ln1.postMessage(ttl = safeTTL, topic = topic, - payload = repeat(byte 0, 10), - targetPeer = some(toNodeId(node2.keys.pubkey))) == true - ln1.protocolState(Waku).queue.items.len == 0 - - asyncTest "Connect two light nodes": - var ln1 = setupTestNode(Waku) - var ln2 = setupTestNode(Waku) - - ln1.setLightNode(true) - ln2.setLightNode(true) - - ln2.startListening() - let peer = await ln1.rlpxConnect(newNode(initENode(ln2.keys.pubKey, - ln2.address))) - check peer.isNil == true + # TODO: Add test for Waku node also listening on Whisper topic From fc718035087470776edf0a0140b04c147860284a Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Tue, 19 Nov 2019 11:13:00 +0800 Subject: [PATCH 08/13] Waku version 0 (after fix-119) --- eth/p2p/rlpx_protocols/waku_protocol.nim | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index 015703da..5d96ec69 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -54,7 +54,7 @@ const defaultQueueCapacity = 256 defaultFilterQueueCapacity = 64 # XXX: Crashes if version is set to 0, P2P DSL bug or spec concern? - wakuVersion* = 1 ## Waku version. + wakuVersion* = 0 ## Waku version. wakuVersionStr* = $wakuVersion ## Waku version. defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. defaultMaxMsgSize* = 1024'u32 * 1024'u32 ## The current default and max From eb06230e7a6b193464c6158cd945461aab480e92 Mon Sep 17 00:00:00 2001 From: Oskar Thoren Date: Tue, 19 Nov 2019 11:14:43 +0800 Subject: [PATCH 09/13] wkk -> waku rlpx based on discussion --- eth/p2p/rlpx_protocols/waku_protocol.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index 5d96ec69..8c22c73c 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -522,7 +522,7 @@ proc initMessage*(env: Envelope, powCalc = true): Message = proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data) proc allowed*(msg: Message, config: WakuConfig): bool = - # Check max msg size, already happens in RLPx but there is a specific wkk + # Check max msg size, already happens in RLPx but there is a specific waku # max msg size which should always be < RLPx max msg size if msg.size > config.maxMsgSize: warn "Message size too large", size = msg.size @@ -748,7 +748,7 @@ proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = asyncCheck node.run(network) p2pProtocol Waku(version = wakuVersion, - rlpxName = "wkk", + rlpxName = "waku", peerState = WakuPeer, networkState = WakuNetwork): From 3efcdf5c351395164e629f3752b71ba0cfb6dc80 Mon Sep 17 00:00:00 2001 From: kdeme Date: Tue, 19 Nov 2019 13:53:38 +0100 Subject: [PATCH 10/13] Remove duplicated code in waku_protocol by importing whisper_types --- eth/p2p/rlpx_protocols/waku_protocol.nim | 673 +---------------------- 1 file changed, 16 insertions(+), 657 deletions(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index 8c22c73c..96cf1ead 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -35,25 +35,17 @@ ## else there will be no peers to send and receive messages from. import - algorithm, bitops, math, options, sequtils, strutils, tables, times, chronos, - secp256k1, chronicles, hashes, stew/[byteutils, endians2], - nimcrypto/[bcmode, hash, keccak, rijndael, sysrand], - eth/common/eth_types, eth/[keys, rlp, async_utils, p2p], eth/p2p/ecies + options, tables, times, chronos, chronicles, + eth/[keys, async_utils, p2p], whisper/whisper_types + +export + whisper_types logScope: topics = "waku" const - flagsLen = 1 ## payload flags field length, bytes - gcmIVLen = 12 ## Length of IV (seed) used for AES - gcmTagLen = 16 ## Length of tag used to authenticate AES-GCM-encrypted message - padMaxLen = 256 ## payload will be padded to multiples of this by default - payloadLenLenBits = 0b11'u8 ## payload flags length-of-length mask - signatureBits = 0b100'u8 ## payload flags signature mask - bloomSize = 512 div 8 defaultQueueCapacity = 256 - defaultFilterQueueCapacity = 64 - # XXX: Crashes if version is set to 0, P2P DSL bug or spec concern? wakuVersion* = 0 ## Waku version. wakuVersionStr* = $wakuVersion ## Waku version. defaultMinPow* = 0.2'f64 ## The default minimum PoW requirement for this node. @@ -65,461 +57,25 @@ const ## queue is pruned, in ms. type - Hash* = MDigest[256] - SymKey* = array[256 div 8, byte] ## AES256 key. - Topic* = array[4, byte] ## 4 bytes that can be used to filter messages on. - Bloom* = array[bloomSize, byte] ## A bloom filter that can be used to identify - ## a number of topics that a peer is interested in. - # XXX: nim-eth-bloom has really quirky API and fixed - # bloom size. - # stint is massive overkill / poor fit - a bloom filter is an array of bits, - # not a number - - Payload* = object - ## Payload is what goes in the data field of the Envelope. - - src*: Option[PrivateKey] ## Optional key used for signing message - dst*: Option[PublicKey] ## Optional key used for asymmetric encryption - symKey*: Option[SymKey] ## Optional key used for symmetric encryption - payload*: Bytes ## Application data / message contents - padding*: Option[Bytes] ## Padding - if unset, will automatically pad up to - ## nearest maxPadLen-byte boundary - DecodedPayload* = object - ## The decoded payload of a received message. - - src*: Option[PublicKey] ## If the message was signed, this is the public key - ## of the source - payload*: Bytes ## Application data / message contents - padding*: Option[Bytes] ## Message padding - - Envelope* = object - ## What goes on the wire in the waku protocol - a payload and some - ## book-keeping - # Don't touch field order, there's lots of macro magic that depends on it - expiry*: uint32 ## Unix timestamp when message expires - ttl*: uint32 ## Time-to-live, seconds - message was created at (expiry - ttl) - topic*: Topic - data*: Bytes ## Payload, as given by user - nonce*: uint64 ## Nonce used for proof-of-work calculation - - Message* = object - ## An Envelope with a few cached properties - - env*: Envelope - hash*: Hash ## Hash, as calculated for proof-of-work - size*: uint32 ## RLP-encoded size of message - pow*: float64 ## Calculated proof-of-work - bloom*: Bloom ## Filter sent to direct peers for topic-based filtering - isP2P: bool - - ReceivedMessage* = object - ## A received message that matched a filter and was possible to decrypt. - ## Contains the decoded payload and additional information. - decoded*: DecodedPayload - timestamp*: uint32 - ttl*: uint32 - topic*: Topic - pow*: float64 - hash*: Hash - dst*: Option[PublicKey] - - Queue* = object - ## Bounded message repository - ## - ## Waku uses proof-of-work to judge the usefulness of a message staying - ## in the "cloud" - messages with low proof-of-work will be removed to make - ## room for those with higher pow, even if they haven't expired yet. - ## Larger messages and those with high time-to-live will require more pow. - items*: seq[Message] ## Sorted by proof-of-work - itemHashes*: HashSet[Message] ## For easy duplication checking - # XXX: itemHashes is added for easy message duplication checking and for - # easy pruning of the peer received message sets. It does have an impact on - # adding and pruning of items however. - # Need to give it some more thought and check where most time is lost in - # typical cases, perhaps we are better of with one hash table (lose PoW - # sorting however), or perhaps there is a simpler solution... - - capacity*: int ## Max messages to keep. \ - ## XXX: really big messages can cause excessive mem usage when using msg \ - ## count - - FilterMsgHandler* = proc(msg: ReceivedMessage) {.gcsafe, closure.} - - Filter* = object - src*: Option[PublicKey] - privateKey*: Option[PrivateKey] - symKey*: Option[SymKey] - topics*: seq[Topic] - powReq*: float64 - allowP2P*: bool - - bloom: Bloom # Cached bloom filter of all topics of filter - handler: FilterMsgHandler - queue: seq[ReceivedMessage] - - Filters* = Table[string, Filter] - WakuConfig* = object powRequirement*: float64 bloom*: Bloom isLightNode*: bool maxMsgSize*: uint32 -# Utilities -------------------------------------------------------------------- - -proc leadingZeroBits(hash: MDigest): int = - ## Number of most significant zero bits before the first one - for h in hash.data: - static: doAssert sizeof(h) == 1 - if h == 0: - result += 8 - else: - result += countLeadingZeroBits(h) - break - -proc calcPow*(size, ttl: uint64, hash: Hash): float64 = - ## Waku proof-of-work is defined as the best bit of a hash divided by - ## encoded size and time-to-live, such that large and long-lived messages get - ## penalized - - let bits = leadingZeroBits(hash) - return pow(2.0, bits.float64) / (size.float64 * ttl.float64) - -proc topicBloom*(topic: Topic): Bloom = - ## Waku uses 512-bit bloom filters meaning 9 bits of indexing - 3 9-bit - ## indexes into the bloom are created using the first 3 bytes of the topic and - ## complementing each byte with an extra bit from the last topic byte - for i in 0..<3: - var idx = uint16(topic[i]) - if (topic[3] and byte(1 shl i)) != 0: # fetch the 9'th bit from the last byte - idx = idx + 256 - - doAssert idx <= 511 - result[idx div 8] = result[idx div 8] or byte(1 shl (idx and 7'u16)) - -proc generateRandomID*(): string = - var bytes: array[256 div 8, byte] - while true: # XXX: error instead of looping? - if randomBytes(bytes) == 256 div 8: - result = toHex(bytes) - break - -proc `or`(a, b: Bloom): Bloom = - for i in 0..= 256*256*256: - notice "Payload exceeds max length", len = self.payload.len - return - - # length of the payload length field :) - let payloadLenLen = - if self.payload.len >= 256*256: 3'u8 - elif self.payload.len >= 256: 2'u8 - else: 1'u8 - - let signatureLen = - if self.src.isSome(): keys.RawSignatureSize - else: 0 - - # useful data length - let dataLen = flagsLen + payloadLenLen.int + self.payload.len + signatureLen - - let padLen = - if self.padding.isSome(): self.padding.get().len - # is there a reason why 256 bytes are padded when the dataLen is 256? - else: padMaxLen - (dataLen mod padMaxLen) - - # buffer space that we need to allocate - let totalLen = dataLen + padLen - - var plain = newSeqOfCap[byte](totalLen) - - let signatureFlag = - if self.src.isSome(): signatureBits - else: 0'u8 - - # byte 0: flags with payload length length and presence of signature - plain.add payloadLenLen or signatureFlag - - # next, length of payload - little endian (who comes up with this stuff? why - # can't the world just settle on one endian?) - let payloadLenLE = self.payload.len.uint32.toBytesLE - - # No, I have no love for nim closed ranges - such a mess to remember the extra - # < or risk off-by-ones when working with lengths.. - plain.add payloadLenLE[0.. pos + keys.RawSignatureSize: - res.padding = some(plain[pos .. ^(keys.RawSignatureSize+1)]) - else: - if plain.len > pos: - res.padding = some(plain[pos .. ^1]) - - return some(res) - -# Envelopes -------------------------------------------------------------------- - -proc valid*(self: Envelope, now = epochTime()): bool = - if self.expiry.float64 < now: return false # expired - if self.ttl <= 0: return false # this would invalidate pow calculation - - let created = self.expiry - self.ttl - if created.float64 > (now + 2.0): return false # created in the future - - return true - -proc len(self: Envelope): int = 20 + self.data.len - -proc toShortRlp*(self: Envelope): Bytes = - ## RLP-encoded message without nonce is used during proof-of-work calculations - rlp.encodeList(self.expiry, self.ttl, self.topic, self.data) - -proc toRlp(self: Envelope): Bytes = - ## What gets sent out over the wire includes the nonce - rlp.encode(self) - -proc minePow*(self: Envelope, seconds: float, bestBitTarget: int = 0): (uint64, Hash) = - ## For the given envelope, spend millis milliseconds to find the - ## best proof-of-work and return the nonce - let bytes = self.toShortRlp() - - var ctx: keccak256 - ctx.init() - ctx.update(bytes) - - var bestBit: int = 0 - - let mineEnd = epochTime() + seconds - - var i: uint64 - while epochTime() < mineEnd or bestBit == 0: # At least one round - var tmp = ctx # copy hash calculated so far - we'll reuse that for each iter - tmp.update(i.toBytesBE()) - # XXX:a random nonce here would not leak number of iters - let hash = tmp.finish() - let zeroBits = leadingZeroBits(hash) - if zeroBits > bestBit: # XXX: could also compare hashes as numbers instead - bestBit = zeroBits - result = (i, hash) - if bestBitTarget > 0 and bestBit >= bestBitTarget: - break - - i.inc - -proc calcPowHash*(self: Envelope): Hash = - ## Calculate the message hash, as done during mining - this can be used to - ## verify proof-of-work - - let bytes = self.toShortRlp() - - var ctx: keccak256 - ctx.init() - ctx.update(bytes) - ctx.update(self.nonce.toBytesBE()) - return ctx.finish() - -# Messages --------------------------------------------------------------------- - -proc cmpPow(a, b: Message): int = - ## Biggest pow first, lowest at the end (for easy popping) - if a.pow > b.pow: 1 - elif a.pow == b.pow: 0 - else: -1 + WakuPeer = ref object + initialized: bool # when successfully completed the handshake + powRequirement*: float64 + bloom*: Bloom + isLightNode*: bool + trusted*: bool + received: HashSet[Message] -proc initMessage*(env: Envelope, powCalc = true): Message = - result.env = env - result.size = env.toRlp().len().uint32 # XXX: calc len without creating RLP - result.bloom = topicBloom(env.topic) - if powCalc: - result.hash = env.calcPowHash() - result.pow = calcPow(result.env.len.uint32, result.env.ttl, result.hash) - trace "Message PoW", pow = result.pow.formatFloat(ffScientific) + WakuNetwork = ref object + queue*: Queue + filters*: Filters + config*: WakuConfig -proc hash*(msg: Message): hashes.Hash = hash(msg.hash.data) proc allowed*(msg: Message, config: WakuConfig): bool = # Check max msg size, already happens in RLPx but there is a specific waku @@ -538,203 +94,6 @@ proc allowed*(msg: Message, config: WakuConfig): bool = return true -# NOTE: Hashing and leading zeroes calculation is now the same between geth, -# parity and this implementation. -# However, there is still a difference in the size calculation. -# See also here: https://github.com/ethereum/go-ethereum/pull/19753 -# This implementation is not conform EIP-627 as we do not use the size of the -# RLP-encoded envelope, but the size of the envelope object itself. -# This is done to be able to correctly calculate the bestBitTarget. -# Other options would be: -# - work directly with powTarget in minePow, but this requires recalculation of -# rlp size + calcPow -# - Use worst case size of envelope nonce -# - Mine PoW for x interval, calcPow of best result, if target not met .. repeat -proc sealEnvelope(msg: var Message, powTime: float, powTarget: float): bool = - let size = msg.env.len - if powTarget > 0: - let x = powTarget * size.float * msg.env.ttl.float - var bestBitTarget: int - if x <= 1: # log() would return negative numbers or 0 - bestBitTarget = 1 - else: - bestBitTarget = ceil(log(x, 2)).int - (msg.env.nonce, msg.hash) = msg.env.minePow(powTime, bestBitTarget) - else: - # If no target is set, we are certain of executed powTime - msg.env.expiry += powTime.uint32 - (msg.env.nonce, msg.hash) = msg.env.minePow(powTime) - - msg.pow = calcPow(size.uint32, msg.env.ttl, msg.hash) - trace "Message PoW", pow = msg.pow - if msg.pow < powTarget: - return false - - return true - -# Queues ----------------------------------------------------------------------- - -proc initQueue*(capacity: int): Queue = - result.items = newSeqOfCap[Message](capacity) - result.capacity = capacity - result.itemHashes.init() - -proc prune(self: var Queue) {.raises: [].} = - ## Remove items that are past their expiry time - let now = epochTime().uint32 - - # keepIf code + pruning of hashset - var pos = 0 - for i in 0 ..< len(self.items): - if self.items[i].env.expiry > now: - if pos != i: - shallowCopy(self.items[pos], self.items[i]) - inc(pos) - else: self.itemHashes.excl(self.items[i]) - setLen(self.items, pos) - -proc add*(self: var Queue, msg: Message): bool = - ## Add a message to the queue. - ## If we're at capacity, we will be removing, in order: - ## * expired messages - ## * lowest proof-of-work message - this may be `msg` itself! - - if self.items.len >= self.capacity: - self.prune() # Only prune if needed - - if self.items.len >= self.capacity: - # Still no room - go by proof-of-work quantity - let last = self.items[^1] - - if last.pow > msg.pow or - (last.pow == msg.pow and last.env.expiry > msg.env.expiry): - # The new message has less pow or will expire earlier - drop it - return false - - self.items.del(self.items.len() - 1) - self.itemHashes.excl(last) - - # check for duplicate - if self.itemHashes.containsOrIncl(msg): - return false - else: - self.items.insert(msg, self.items.lowerBound(msg, cmpPow)) - return true - -# Filters ---------------------------------------------------------------------- -proc newFilter*(src = none[PublicKey](), privateKey = none[PrivateKey](), - symKey = none[SymKey](), topics: seq[Topic] = @[], - powReq = 0.0, allowP2P = false): Filter = - # Zero topics will give an empty bloom filter which is fine as this bloom - # filter is only used to `or` with existing/other bloom filters. Not to do - # matching. - Filter(src: src, privateKey: privateKey, symKey: symKey, topics: topics, - powReq: powReq, allowP2P: allowP2P, bloom: toBloom(topics)) - -proc subscribeFilter*(filters: var Filters, filter: Filter, - handler:FilterMsgHandler = nil): string = - # NOTE: Should we allow a filter without a key? Encryption is mandatory in v6? - # Check if asymmetric _and_ symmetric key? Now asymmetric just has precedence. - let id = generateRandomID() - var filter = filter - if handler.isNil(): - filter.queue = newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) - else: - filter.handler = handler - - filters.add(id, filter) - debug "Filter added", filter = id - return id - -proc notify*(filters: var Filters, msg: Message) {.gcsafe.} = - var decoded: Option[DecodedPayload] - var keyHash: Hash - var dst: Option[PublicKey] - - for filter in filters.mvalues: - if not filter.allowP2P and msg.isP2P: - continue - - # if message is direct p2p PoW doesn't matter - if msg.pow < filter.powReq and not msg.isP2P: - continue - - if filter.topics.len > 0: - if msg.env.topic notin filter.topics: - continue - - # Decode, if already decoded previously check if hash of key matches - if decoded.isNone(): - decoded = decode(msg.env.data, dst = filter.privateKey, - symKey = filter.symKey) - if decoded.isNone(): - continue - if filter.privateKey.isSome(): - keyHash = keccak256.digest(filter.privateKey.get().data) - # TODO: Get rid of the hash and just use pubkey to compare? - dst = some(getPublicKey(filter.privateKey.get())) - elif filter.symKey.isSome(): - keyHash = keccak256.digest(filter.symKey.get()) - # else: - # NOTE: In this case the message was not encrypted - else: - if filter.privateKey.isSome(): - if keyHash != keccak256.digest(filter.privateKey.get().data): - continue - elif filter.symKey.isSome(): - if keyHash != keccak256.digest(filter.symKey.get()): - continue - # else: - # NOTE: In this case the message was not encrypted - - # When decoding is done we can check the src (signature) - if filter.src.isSome(): - let src: Option[PublicKey] = decoded.get().src - if not src.isSome(): - continue - elif src.get() != filter.src.get(): - continue - - let receivedMsg = ReceivedMessage(decoded: decoded.get(), - timestamp: msg.env.expiry - msg.env.ttl, - ttl: msg.env.ttl, - topic: msg.env.topic, - pow: msg.pow, - hash: msg.hash, - dst: dst) - # Either run callback or add to queue - if filter.handler.isNil(): - filter.queue.insert(receivedMsg) - else: - filter.handler(receivedMsg) - -proc getFilterMessages*(filters: var Filters, filterId: string): seq[ReceivedMessage] = - result = @[] - if filters.contains(filterId): - if filters[filterId].handler.isNil(): - shallowCopy(result, filters[filterId].queue) - filters[filterId].queue = - newSeqOfCap[ReceivedMessage](defaultFilterQueueCapacity) - -proc toBloom*(filters: Filters): Bloom = - for filter in filters.values: - if filter.topics.len > 0: - result = result or filter.bloom - -type - WakuPeer = ref object - initialized: bool # when successfully completed the handshake - powRequirement*: float64 - bloom*: Bloom - isLightNode*: bool - trusted*: bool - received: HashSet[Message] - - WakuNetwork = ref object - queue*: Queue - filters*: Filters - config*: WakuConfig - proc run(peer: Peer) {.gcsafe, async.} proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.} From ffbc42aee52fcf4c766d293d8690da4ec7afad6d Mon Sep 17 00:00:00 2001 From: kdeme Date: Tue, 19 Nov 2019 17:22:35 +0100 Subject: [PATCH 11/13] Implement quick Waku - Whisper bridge by sharing the queue + adjust test --- eth.nimble | 1 + eth/p2p/rlpx_protocols/waku_protocol.nim | 18 +- eth/p2p/rlpx_protocols/whisper_protocol.nim | 13 +- tests/p2p/test_waku.nim | 416 -------------------- tests/p2p/test_waku_bridge.nim | 93 +++++ tests/p2p/test_waku_connect.nim | 390 ------------------ 6 files changed, 113 insertions(+), 818 deletions(-) delete mode 100644 tests/p2p/test_waku.nim create mode 100644 tests/p2p/test_waku_bridge.nim delete mode 100644 tests/p2p/test_waku_connect.nim diff --git a/eth.nimble b/eth.nimble index 296b3cd7..6215f14c 100644 --- a/eth.nimble +++ b/eth.nimble @@ -52,6 +52,7 @@ proc runP2pTests() = "test_shh", "test_shh_config", "test_shh_connect", + "test_waku_bridge", "test_protocol_handlers", ]: runTest("tests/p2p/" & filename) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index 96cf1ead..ff048863 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -38,6 +38,8 @@ import options, tables, times, chronos, chronicles, eth/[keys, async_utils, p2p], whisper/whisper_types +import eth/p2p/rlpx_protocols/whisper_protocol + export whisper_types @@ -72,7 +74,7 @@ type received: HashSet[Message] WakuNetwork = ref object - queue*: Queue + queue*: ref Queue filters*: Filters config*: WakuConfig @@ -98,7 +100,11 @@ proc run(peer: Peer) {.gcsafe, async.} proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.} proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = - network.queue = initQueue(defaultQueueCapacity) + if node.protocolState(Whisper).isNil: + new(network.queue) + network.queue[] = initQueue(defaultQueueCapacity) + else: + network.queue = node.protocolState(Whisper).queue network.filters = initTable[string, Filter]() network.config.bloom = fullBloom() network.config.powRequirement = defaultMinPow @@ -195,7 +201,7 @@ p2pProtocol Waku(version = wakuVersion, # This can still be a duplicate message, but from another peer than # the peer who send the message. - if peer.networkState.queue.add(msg): + if peer.networkState.queue[].add(msg): # notify filters of this message peer.networkState.filters.notify(msg) @@ -294,7 +300,7 @@ proc run(node: EthereumNode, network: WakuNetwork) {.async.} = while true: # prune message queue every second # TTL unit is in seconds, so this should be sufficient? - network.queue.prune() + network.queue[].prune() # pruning the received sets is not necessary for correct workings # but simply from keeping the sets growing indefinitely node.pruneReceived() @@ -317,7 +323,7 @@ proc queueMessage(node: EthereumNode, msg: Message): bool = return false trace "Adding message to queue" - if wakuNet.queue.add(msg): + if wakuNet.queue[].add(msg): # Also notify our own filters of the message we are sending, # e.g. msg from local Dapp to Dapp wakuNet.filters.notify(msg) @@ -459,4 +465,4 @@ proc resetMessageQueue*(node: EthereumNode) = ## Full reset of the message queue. ## ## NOTE: Not something that should be run in normal circumstances. - node.protocolState(Waku).queue = initQueue(defaultQueueCapacity) + node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity) diff --git a/eth/p2p/rlpx_protocols/whisper_protocol.nim b/eth/p2p/rlpx_protocols/whisper_protocol.nim index c23d4e55..d92919d8 100644 --- a/eth/p2p/rlpx_protocols/whisper_protocol.nim +++ b/eth/p2p/rlpx_protocols/whisper_protocol.nim @@ -70,7 +70,7 @@ type received: HashSet[Message] WhisperNetwork = ref object - queue*: Queue + queue*: ref Queue filters*: Filters config*: WhisperConfig @@ -95,7 +95,8 @@ proc run(peer: Peer) {.gcsafe, async.} proc run(node: EthereumNode, network: WhisperNetwork) {.gcsafe, async.} proc initProtocolState*(network: WhisperNetwork, node: EthereumNode) {.gcsafe.} = - network.queue = initQueue(defaultQueueCapacity) + new(network.queue) + network.queue[] = initQueue(defaultQueueCapacity) network.filters = initTable[string, Filter]() network.config.bloom = fullBloom() network.config.powRequirement = defaultMinPow @@ -192,7 +193,7 @@ p2pProtocol Whisper(version = whisperVersion, # This can still be a duplicate message, but from another peer than # the peer who send the message. - if peer.networkState.queue.add(msg): + if peer.networkState.queue[].add(msg): # notify filters of this message peer.networkState.filters.notify(msg) @@ -291,7 +292,7 @@ proc run(node: EthereumNode, network: WhisperNetwork) {.async.} = while true: # prune message queue every second # TTL unit is in seconds, so this should be sufficient? - network.queue.prune() + network.queue[].prune() # pruning the received sets is not necessary for correct workings # but simply from keeping the sets growing indefinitely node.pruneReceived() @@ -314,7 +315,7 @@ proc queueMessage(node: EthereumNode, msg: Message): bool = return false trace "Adding message to queue" - if whisperNet.queue.add(msg): + if whisperNet.queue[].add(msg): # Also notify our own filters of the message we are sending, # e.g. msg from local Dapp to Dapp whisperNet.filters.notify(msg) @@ -456,4 +457,4 @@ proc resetMessageQueue*(node: EthereumNode) = ## Full reset of the message queue. ## ## NOTE: Not something that should be run in normal circumstances. - node.protocolState(Whisper).queue = initQueue(defaultQueueCapacity) + node.protocolState(Whisper).queue[] = initQueue(defaultQueueCapacity) diff --git a/tests/p2p/test_waku.nim b/tests/p2p/test_waku.nim deleted file mode 100644 index 37704bfd..00000000 --- a/tests/p2p/test_waku.nim +++ /dev/null @@ -1,416 +0,0 @@ -# -# Ethereum P2P -# (c) Copyright 2018 -# Status Research & Development GmbH -# -# Licensed under either of -# Apache License, version 2.0, (LICENSE-APACHEv2) -# MIT license (LICENSE-MIT) - -import - sequtils, options, unittest, times, tables, - nimcrypto/hash, - eth/[keys, rlp], - eth/p2p/rlpx_protocols/waku_protocol as waku - -suite "Waku payload": - test "should roundtrip without keys": - let payload = Payload(payload: @[byte 0, 1, 2]) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().src.isNone() - decoded.get().padding.get().len == 251 # 256 -1 -1 -3 - - test "should roundtrip with symmetric encryption": - var symKey: SymKey - let payload = Payload(symKey: some(symKey), payload: @[byte 0, 1, 2]) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get(), symKey = some(symKey)) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().src.isNone() - decoded.get().padding.get().len == 251 # 256 -1 -1 -3 - - test "should roundtrip with signature": - let privKey = keys.newPrivateKey() - - let payload = Payload(src: some(privKey), payload: @[byte 0, 1, 2]) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - privKey.getPublicKey() == decoded.get().src.get() - decoded.get().padding.get().len == 186 # 256 -1 -1 -3 -65 - - test "should roundtrip with asymmetric encryption": - let privKey = keys.newPrivateKey() - - let payload = Payload(dst: some(privKey.getPublicKey()), - payload: @[byte 0, 1, 2]) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get(), dst = some(privKey)) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().src.isNone() - decoded.get().padding.get().len == 251 # 256 -1 -1 -3 - - test "should return specified bloom": - # Geth test: https://github.com/ethersphere/go-ethereum/blob/d3441ebb563439bac0837d70591f92e2c6080303/waku/wakuv6/waku_test.go#L834 - let top0 = [byte 0, 0, 255, 6] - var x: Bloom - x[0] = byte 1 - x[32] = byte 1 - x[^1] = byte 128 - check @(top0.topicBloom) == @x - -suite "Waku payload padding": - test "should do max padding": - let payload = Payload(payload: repeat(byte 1, 254)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().padding.isSome() - decoded.get().padding.get().len == 256 # as dataLen == 256 - - test "should do max padding with signature": - let privKey = keys.newPrivateKey() - - let payload = Payload(src: some(privKey), payload: repeat(byte 1, 189)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - privKey.getPublicKey() == decoded.get().src.get() - decoded.get().padding.isSome() - decoded.get().padding.get().len == 256 # as dataLen == 256 - - test "should do min padding": - let payload = Payload(payload: repeat(byte 1, 253)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().padding.isSome() - decoded.get().padding.get().len == 1 # as dataLen == 255 - - test "should do min padding with signature": - let privKey = keys.newPrivateKey() - - let payload = Payload(src: some(privKey), payload: repeat(byte 1, 188)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - privKey.getPublicKey() == decoded.get().src.get() - decoded.get().padding.isSome() - decoded.get().padding.get().len == 1 # as dataLen == 255 - - test "should roundtrip custom padding": - let payload = Payload(payload: repeat(byte 1, 10), - padding: some(repeat(byte 2, 100))) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().padding.isSome() - payload.padding.get() == decoded.get().padding.get() - - test "should roundtrip custom 0 padding": - let padding: seq[byte] = @[] - let payload = Payload(payload: repeat(byte 1, 10), - padding: some(padding)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - decoded.get().padding.isNone() - - test "should roundtrip custom padding with signature": - let privKey = keys.newPrivateKey() - let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), - padding: some(repeat(byte 2, 100))) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - privKey.getPublicKey() == decoded.get().src.get() - decoded.get().padding.isSome() - payload.padding.get() == decoded.get().padding.get() - - test "should roundtrip custom 0 padding with signature": - let padding: seq[byte] = @[] - let privKey = keys.newPrivateKey() - let payload = Payload(src: some(privKey), payload: repeat(byte 1, 10), - padding: some(padding)) - let encoded = waku.encode(payload) - - let decoded = waku.decode(encoded.get()) - check: - decoded.isSome() - payload.payload == decoded.get().payload - privKey.getPublicKey() == decoded.get().src.get() - decoded.get().padding.isNone() - -# example from https://github.com/paritytech/parity-ethereum/blob/93e1040d07e385d1219d00af71c46c720b0a1acf/waku/src/message.rs#L439 -let - env0 = Envelope( - expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], - data: repeat(byte 9, 256), nonce: 1010101) - env1 = Envelope( - expiry:100000, ttl: 30, topic: [byte 0, 0, 0, 0], - data: repeat(byte 9, 256), nonce: 1010102) - -suite "Waku envelope": - - proc hashAndPow(env: Envelope): (string, float64) = - # This is the current implementation of go-ethereum - let size = env.toShortRlp().len().uint32 - # This is our current implementation in `waku_protocol.nim` - # let size = env.len().uint32 - # This is the EIP-627 specification - # let size = env.toRlp().len().uint32 - let hash = env.calcPowHash() - ($hash, calcPow(size, env.ttl, hash)) - - test "PoW calculation leading zeroes tests": - # Test values from Parity, in message.rs - let testHashes = [ - # 256 leading zeroes - "0x0000000000000000000000000000000000000000000000000000000000000000", - # 255 leading zeroes - "0x0000000000000000000000000000000000000000000000000000000000000001", - # no leading zeroes - "0xff00000000000000000000000000000000000000000000000000000000000000" - ] - check: - calcPow(1, 1, Hash.fromHex(testHashes[0])) == - 115792089237316200000000000000000000000000000000000000000000000000000000000000.0 - calcPow(1, 1, Hash.fromHex(testHashes[1])) == - 57896044618658100000000000000000000000000000000000000000000000000000000000000.0 - calcPow(1, 1, Hash.fromHex(testHashes[2])) == 1.0 - - # Test values from go-ethereum wakuv6 in envelope_test - var env = Envelope(ttl: 1, data: @[byte 0xde, 0xad, 0xbe, 0xef]) - # PoW calculation with no leading zeroes - env.nonce = 100000 - check hashAndPoW(env) == ("A788E02A95BFC673709E97CA81E39CA903BAD5638D3388964C51EB64952172D6", - 0.07692307692307693) - # PoW calculation with 8 leading zeroes - env.nonce = 276 - check hashAndPoW(env) == ("00E2374C6353C243E4073E209A7F2ACB2506522AF318B3B78CF9A88310A2A11C", - 19.692307692307693) - - test "should validate and allow envelope according to config": - let ttl = 1'u32 - let topic = [byte 1, 2, 3, 4] - let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), - isLightNode: false, maxMsgSize: defaultMaxMsgSize) - - let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, - data: repeat(byte 9, 256), nonce: 0) - check env.valid() - - let msg = initMessage(env) - check msg.allowed(config) - - test "should invalidate envelope due to ttl 0": - let ttl = 0'u32 - let topic = [byte 1, 2, 3, 4] - let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), - isLightNode: false, maxMsgSize: defaultMaxMsgSize) - - let env = Envelope(expiry:epochTime().uint32 + ttl, ttl: ttl, topic: topic, - data: repeat(byte 9, 256), nonce: 0) - check env.valid() == false - - test "should invalidate envelope due to expired": - let ttl = 1'u32 - let topic = [byte 1, 2, 3, 4] - let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), - isLightNode: false, maxMsgSize: defaultMaxMsgSize) - - let env = Envelope(expiry:epochTime().uint32, ttl: ttl, topic: topic, - data: repeat(byte 9, 256), nonce: 0) - check env.valid() == false - - test "should invalidate envelope due to in the future": - let ttl = 1'u32 - let topic = [byte 1, 2, 3, 4] - let config = WakuConfig(powRequirement: 0, bloom: topic.topicBloom(), - isLightNode: false, maxMsgSize: defaultMaxMsgSize) - - # there is currently a 2 second tolerance, hence the + 3 - let env = Envelope(expiry:epochTime().uint32 + ttl + 3, ttl: ttl, topic: topic, - data: repeat(byte 9, 256), nonce: 0) - check env.valid() == false - - test "should not allow envelope due to bloom filter": - let topic = [byte 1, 2, 3, 4] - let wrongTopic = [byte 9, 8, 7, 6] - let config = WakuConfig(powRequirement: 0, bloom: wrongTopic.topicBloom(), - isLightNode: false, maxMsgSize: defaultMaxMsgSize) - - let env = Envelope(expiry:100000 , ttl: 30, topic: topic, - data: repeat(byte 9, 256), nonce: 0) - - let msg = initMessage(env) - check msg.allowed(config) == false - - -suite "Waku queue": - test "should throw out lower proof-of-work item when full": - var queue = initQueue(1) - - let msg0 = initMessage(env0) - let msg1 = initMessage(env1) - - discard queue.add(msg0) - discard queue.add(msg1) - - check: - queue.items.len() == 1 - queue.items[0].env.nonce == - (if msg0.pow > msg1.pow: msg0.env.nonce else: msg1.env.nonce) - - test "should not throw out messages as long as there is capacity": - var queue = initQueue(2) - - check: - queue.add(initMessage(env0)) == true - queue.add(initMessage(env1)) == true - - queue.items.len() == 2 - - test "check field order against expected rlp order": - check rlp.encode(env0) == - rlp.encodeList(env0.expiry, env0.ttl, env0.topic, env0.data, env0.nonce) - -# To test filters we do not care if the msg is valid or allowed -proc prepFilterTestMsg(pubKey = none[PublicKey](), symKey = none[SymKey](), - src = none[PrivateKey](), topic: Topic, - padding = none[seq[byte]]()): Message = - let payload = Payload(dst: pubKey, symKey: symKey, src: src, - payload: @[byte 0, 1, 2], padding: padding) - let encoded = waku.encode(payload) - let env = Envelope(expiry: 1, ttl: 1, topic: topic, data: encoded.get(), - nonce: 0) - result = initMessage(env) - -suite "Waku filter": - test "should notify filter on message with symmetric encryption": - var symKey: SymKey - let topic = [byte 0, 0, 0, 0] - let msg = prepFilterTestMsg(symKey = some(symKey), topic = topic) - - var filters = initTable[string, Filter]() - let filter = newFilter(symKey = some(symKey), topics = @[topic]) - let filterId = filters.subscribeFilter(filter) - - notify(filters, msg) - - let messages = filters.getFilterMessages(filterId) - check: - messages.len == 1 - messages[0].decoded.src.isNone() - messages[0].dst.isNone() - - test "should notify filter on message with asymmetric encryption": - let privKey = keys.newPrivateKey() - let topic = [byte 0, 0, 0, 0] - let msg = prepFilterTestMsg(pubKey = some(privKey.getPublicKey()), - topic = topic) - - var filters = initTable[string, Filter]() - let filter = newFilter(privateKey = some(privKey), topics = @[topic]) - let filterId = filters.subscribeFilter(filter) - - notify(filters, msg) - - let messages = filters.getFilterMessages(filterId) - check: - messages.len == 1 - messages[0].decoded.src.isNone() - messages[0].dst.isSome() - - test "should notify filter on message with signature": - let privKey = keys.newPrivateKey() - let topic = [byte 0, 0, 0, 0] - let msg = prepFilterTestMsg(src = some(privKey), topic = topic) - - var filters = initTable[string, Filter]() - let filter = newFilter(src = some(privKey.getPublicKey()), - topics = @[topic]) - let filterId = filters.subscribeFilter(filter) - - notify(filters, msg) - - let messages = filters.getFilterMessages(filterId) - check: - messages.len == 1 - messages[0].decoded.src.isSome() - messages[0].dst.isNone() - - test "test notify of filter against PoW requirement": - let topic = [byte 0, 0, 0, 0] - let padding = some(repeat(byte 0, 251)) - # this message has a PoW of 0.02962962962962963, number should be updated - # in case PoW algorithm changes or contents of padding, payload, topic, etc. - # update: now with NON rlp encoded envelope size the PoW of this message is - # 0.014492753623188406 - let msg = prepFilterTestMsg(topic = topic, padding = padding) - - var filters = initTable[string, Filter]() - let - filterId1 = filters.subscribeFilter( - newFilter(topics = @[topic], powReq = 0.014492753623188406)) - filterId2 = filters.subscribeFilter( - newFilter(topics = @[topic], powReq = 0.014492753623188407)) - - notify(filters, msg) - - check: - filters.getFilterMessages(filterId1).len == 1 - filters.getFilterMessages(filterId2).len == 0 - - test "test notify of filter on message with certain topic": - let - topic1 = [byte 0xAB, 0x12, 0xCD, 0x34] - topic2 = [byte 0, 0, 0, 0] - - let msg = prepFilterTestMsg(topic = topic1) - - var filters = initTable[string, Filter]() - let - filterId1 = filters.subscribeFilter(newFilter(topics = @[topic1])) - filterId2 = filters.subscribeFilter(newFilter(topics = @[topic2])) - - notify(filters, msg) - - check: - filters.getFilterMessages(filterId1).len == 1 - filters.getFilterMessages(filterId2).len == 0 diff --git a/tests/p2p/test_waku_bridge.nim b/tests/p2p/test_waku_bridge.nim new file mode 100644 index 00000000..abf7e453 --- /dev/null +++ b/tests/p2p/test_waku_bridge.nim @@ -0,0 +1,93 @@ +# +# Ethereum P2P +# (c) Copyright 2018 +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +import + sequtils, unittest, tables, chronos, eth/p2p, eth/p2p/peer_pool, + ./p2p_test_helper + +import eth/p2p/rlpx_protocols/waku_protocol as waku +import eth/p2p/rlpx_protocols/whisper_protocol as whisper + +let safeTTL = 5'u32 +let waitInterval = waku.messageInterval + 150.milliseconds + +suite "Waku - Whisper bridge tests": + # Waku Whisper node has both capabilities, listens to Whisper and Waku and + # relays traffic between the two. + var + nodeWakuWhisper = setupTestNode(Whisper, Waku) # This will be the bridge + nodeWhisper = setupTestNode(Whisper) + nodeWaku = setupTestNode(Waku) + + nodeWakuWhisper.startListening() + let bridgeNode = newNode(initENode(nodeWakuWhisper.keys.pubKey, + nodeWakuWhisper.address)) + waitFor nodeWhisper.peerPool.connectToNode(bridgeNode) + waitFor nodeWaku.peerPool.connectToNode(bridgeNode) + + asyncTest "WakuWhisper and Whisper peers connected": + check: + nodeWakuWhisper.peerPool.connectedNodes.len() == 2 + + asyncTest "Whisper - Waku communcation via bridge": + # topic whisper node subscribes to, waku node posts to + let topic1 = [byte 0x12, 0, 0, 0] + # topic waku node subscribes to, whisper node posts to + let topic2 = [byte 0x34, 0, 0, 0] + var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] + var futures = [newFuture[int](), newFuture[int]()] + + proc handler1(msg: whisper.ReceivedMessage) = + check msg.decoded.payload == payloads[0] + futures[0].complete(1) + proc handler2(msg: waku.ReceivedMessage) = + check msg.decoded.payload == payloads[1] + futures[1].complete(1) + + var filter1 = whisper.subscribeFilter(nodeWhisper, + whisper.newFilter(topics = @[topic1]), handler1) + var filter2 = waku.subscribeFilter(nodeWaku, + waku.newFilter(topics = @[topic2]), handler2) + + check: + # Message should also end up in the Whisper node its queue via the bridge + waku.postMessage(nodeWaku, ttl = safeTTL + 1, topic = topic1, + payload = payloads[0]) == true + # Message should also end up in the Waku node its queue via the bridge + whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2, + payload = payloads[1]) == true + nodeWhisper.protocolState(Whisper).queue.items.len == 1 + nodeWaku.protocolState(Waku).queue.items.len == 1 + + # waitInterval*2 as messages have to pass the bridge also (2 hops) + await allFutures(futures).withTimeout(waitInterval*2) + + # Relay can receive Whisper & Waku messages + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2 + nodeWakuWhisper.protocolState(Waku).queue.items.len == 2 + + # Whisper node can receive Waku messages (via bridge) + nodeWhisper.protocolState(Whisper).queue.items.len == 2 + # Waku node can receive Whisper messages (via bridge) + nodeWaku.protocolState(Waku).queue.items.len == 2 + + whisper.unsubscribeFilter(nodeWhisper, filter1) == true + waku.unsubscribeFilter(nodeWaku, filter2) == true + + # XXX: This reads a bit weird, but eh + waku.resetMessageQueue(nodeWaku) + whisper.resetMessageQueue(nodeWhisper) + # shared queue so Waku and Whisper should be set to 0 + waku.resetMessageQueue(nodeWakuWhisper) + + check: + nodeWhisper.protocolState(Whisper).queue.items.len == 0 + nodeWaku.protocolState(Waku).queue.items.len == 0 + nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0 + nodeWakuWhisper.protocolState(Waku).queue.items.len == 0 diff --git a/tests/p2p/test_waku_connect.nim b/tests/p2p/test_waku_connect.nim deleted file mode 100644 index 88ad58f7..00000000 --- a/tests/p2p/test_waku_connect.nim +++ /dev/null @@ -1,390 +0,0 @@ -# -# Ethereum P2P -# (c) Copyright 2018 -# Status Research & Development GmbH -# -# Licensed under either of -# Apache License, version 2.0, (LICENSE-APACHEv2) -# MIT license (LICENSE-MIT) - -import - sequtils, options, unittest, tables, chronos, eth/[keys, p2p], - eth/p2p/peer_pool, ./p2p_test_helper - -import eth/p2p/rlpx_protocols/waku_protocol as waku -import eth/p2p/rlpx_protocols/whisper_protocol as whisper - -# proc resetMessageQueues(nodes: varargs[EthereumNode]) = -# for node in nodes: -# resetMessageQueue(node) - -let safeTTL = 5'u32 -let waitInterval = waku.messageInterval + 150.milliseconds - -suite "Waku connections": - var node1 = setupTestNode(Waku) - var node2 = setupTestNode(Waku) - node2.startListening() - waitFor node1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, - node2.address))) - - # Waku Whisper has both capabilities and listens to Whisper, then relays traffic - var nodeWakuWhisper = setupTestNode(Waku, Whisper) - # XXX: Assuming we added Whisper capability here - var nodeWhisper = setupTestNode(Whisper) - # TODO: Connect them - nodeWakuWhisper.startListening() - waitFor nodeWhisper.peerPool.connectToNode(newNode(initENode(nodeWakuWhisper.keys.pubKey, - nodeWakuWhisper.address))) - - # NOTE: Commented out Whisper equivalent tests - # To enable, fully qualify nodes - - # asyncTest "Two peers connected": - # check: - # node1.peerPool.connectedNodes.len() == 1 - - # asyncTest "Filters with encryption and signing": - # let encryptKeyPair = newKeyPair() - # let signKeyPair = newKeyPair() - # var symKey: SymKey - # let topic = [byte 0x12, 0, 0, 0] - # var filters: seq[string] = @[] - # var payloads = [repeat(byte 1, 10), repeat(byte 2, 10), - # repeat(byte 3, 10), repeat(byte 4, 10)] - # var futures = [newFuture[int](), newFuture[int](), - # newFuture[int](), newFuture[int]()] - - # proc handler1(msg: ReceivedMessage) = - # var count {.global.}: int - # check msg.decoded.payload == payloads[0] or msg.decoded.payload == payloads[1] - # count += 1 - # if count == 2: futures[0].complete(1) - # proc handler2(msg: ReceivedMessage) = - # check msg.decoded.payload == payloads[1] - # futures[1].complete(1) - # proc handler3(msg: ReceivedMessage) = - # var count {.global.}: int - # check msg.decoded.payload == payloads[2] or msg.decoded.payload == payloads[3] - # count += 1 - # if count == 2: futures[2].complete(1) - # proc handler4(msg: ReceivedMessage) = - # check msg.decoded.payload == payloads[3] - # futures[3].complete(1) - - # # Filters - # # filter for encrypted asym - # filters.add(node1.subscribeFilter(newFilter(privateKey = some(encryptKeyPair.seckey), - # topics = @[topic]), handler1)) - # # filter for encrypted asym + signed - # filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), - # privateKey = some(encryptKeyPair.seckey), - # topics = @[topic]), handler2)) - # # filter for encrypted sym - # filters.add(node1.subscribeFilter(newFilter(symKey = some(symKey), - # topics = @[topic]), handler3)) - # # filter for encrypted sym + signed - # filters.add(node1.subscribeFilter(newFilter(some(signKeyPair.pubkey), - # symKey = some(symKey), - # topics = @[topic]), handler4)) - # # Messages - # check: - # # encrypted asym - # node2.postMessage(some(encryptKeyPair.pubkey), ttl = safeTTL, - # topic = topic, payload = payloads[0]) == true - # # encrypted asym + signed - # node2.postMessage(some(encryptKeyPair.pubkey), - # src = some(signKeyPair.seckey), ttl = safeTTL, - # topic = topic, payload = payloads[1]) == true - # # encrypted sym - # node2.postMessage(symKey = some(symKey), ttl = safeTTL, topic = topic, - # payload = payloads[2]) == true - # # encrypted sym + signed - # node2.postMessage(symKey = some(symKey), - # src = some(signKeyPair.seckey), - # ttl = safeTTL, topic = topic, - # payload = payloads[3]) == true - - # node2.protocolState(Waku).queue.items.len == 4 - - # check: - # await allFutures(futures).withTimeout(waitInterval) - # node1.protocolState(Waku).queue.items.len == 4 - - # for filter in filters: - # check node1.unsubscribeFilter(filter) == true - - # resetMessageQueues(node1, node2) - - # asyncTest "Filters with topics": - # let topic1 = [byte 0x12, 0, 0, 0] - # let topic2 = [byte 0x34, 0, 0, 0] - # var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] - # var futures = [newFuture[int](), newFuture[int]()] - # proc handler1(msg: ReceivedMessage) = - # check msg.decoded.payload == payloads[0] - # futures[0].complete(1) - # proc handler2(msg: ReceivedMessage) = - # check msg.decoded.payload == payloads[1] - # futures[1].complete(1) - - # var filter1 = node1.subscribeFilter(newFilter(topics = @[topic1]), handler1) - # var filter2 = node1.subscribeFilter(newFilter(topics = @[topic2]), handler2) - - # check: - # node2.postMessage(ttl = safeTTL + 1, topic = topic1, - # payload = payloads[0]) == true - # node2.postMessage(ttl = safeTTL, topic = topic2, - # payload = payloads[1]) == true - # node2.protocolState(Waku).queue.items.len == 2 - - # await allFutures(futures).withTimeout(waitInterval) - # node1.protocolState(Waku).queue.items.len == 2 - - # node1.unsubscribeFilter(filter1) == true - # node1.unsubscribeFilter(filter2) == true - - # resetMessageQueues(node1, node2) - - # asyncTest "Filters with PoW": - # let topic = [byte 0x12, 0, 0, 0] - # var payload = repeat(byte 0, 10) - # var futures = [newFuture[int](), newFuture[int]()] - # proc handler1(msg: ReceivedMessage) = - # check msg.decoded.payload == payload - # futures[0].complete(1) - # proc handler2(msg: ReceivedMessage) = - # check msg.decoded.payload == payload - # futures[1].complete(1) - - # var filter1 = node1.subscribeFilter(newFilter(topics = @[topic], powReq = 0), - # handler1) - # var filter2 = node1.subscribeFilter(newFilter(topics = @[topic], - # powReq = 1_000_000), handler2) - - # check: - # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - - # (await futures[0].withTimeout(waitInterval)) == true - # (await futures[1].withTimeout(waitInterval)) == false - # node1.protocolState(Waku).queue.items.len == 1 - - # node1.unsubscribeFilter(filter1) == true - # node1.unsubscribeFilter(filter2) == true - - # resetMessageQueues(node1, node2) - - # asyncTest "Filters with queues": - # let topic = [byte 0, 0, 0, 0] - # let payload = repeat(byte 0, 10) - - # var filter = node1.subscribeFilter(newFilter(topics = @[topic])) - # for i in countdown(10, 1): - # check node2.postMessage(ttl = safeTTL, topic = topic, - # payload = payload) == true - - # await sleepAsync(waitInterval) - # check: - # node1.getFilterMessages(filter).len() == 10 - # node1.getFilterMessages(filter).len() == 0 - # node1.unsubscribeFilter(filter) == true - - # resetMessageQueues(node1, node2) - - # asyncTest "Local filter notify": - # let topic = [byte 0, 0, 0, 0] - - # var filter = node1.subscribeFilter(newFilter(topics = @[topic])) - # check: - # node1.postMessage(ttl = safeTTL, topic = topic, - # payload = repeat(byte 4, 10)) == true - # node1.getFilterMessages(filter).len() == 1 - # node1.unsubscribeFilter(filter) == true - - # await sleepAsync(waitInterval) - # resetMessageQueues(node1, node2) - - # asyncTest "Bloomfilter blocking": - # let sendTopic1 = [byte 0x12, 0, 0, 0] - # let sendTopic2 = [byte 0x34, 0, 0, 0] - # let filterTopics = @[[byte 0x34, 0, 0, 0],[byte 0x56, 0, 0, 0]] - # let payload = repeat(byte 0, 10) - # var f: Future[int] = newFuture[int]() - # proc handler(msg: ReceivedMessage) = - # check msg.decoded.payload == payload - # f.complete(1) - # var filter = node1.subscribeFilter(newFilter(topics = filterTopics), handler) - # await node1.setBloomFilter(node1.filtersToBloom()) - - # check: - # node2.postMessage(ttl = safeTTL, topic = sendTopic1, - # payload = payload) == true - # node2.protocolState(Waku).queue.items.len == 1 - - # (await f.withTimeout(waitInterval)) == false - # node1.protocolState(Waku).queue.items.len == 0 - - # resetMessageQueues(node1, node2) - - # f = newFuture[int]() - - # check: - # node2.postMessage(ttl = safeTTL, topic = sendTopic2, - # payload = payload) == true - # node2.protocolState(Waku).queue.items.len == 1 - - # await f.withTimeout(waitInterval) - # f.read() == 1 - # node1.protocolState(Waku).queue.items.len == 1 - - # node1.unsubscribeFilter(filter) == true - - # await node1.setBloomFilter(fullBloom()) - - # resetMessageQueues(node1, node2) - - # asyncTest "PoW blocking": - # let topic = [byte 0, 0, 0, 0] - # let payload = repeat(byte 0, 10) - - # await node1.setPowRequirement(1_000_000) - # check: - # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - # node2.protocolState(Waku).queue.items.len == 1 - # await sleepAsync(waitInterval) - # check: - # node1.protocolState(Waku).queue.items.len == 0 - - # resetMessageQueues(node1, node2) - - # await node1.setPowRequirement(0.0) - # check: - # node2.postMessage(ttl = safeTTL, topic = topic, payload = payload) == true - # node2.protocolState(Waku).queue.items.len == 1 - # await sleepAsync(waitInterval) - # check: - # node1.protocolState(Waku).queue.items.len == 1 - - # resetMessageQueues(node1, node2) - - # asyncTest "Queue pruning": - # let topic = [byte 0, 0, 0, 0] - # let payload = repeat(byte 0, 10) - # # We need a minimum TTL of 2 as when set to 1 there is a small chance that - # # it is already expired after messageInterval due to rounding down of float - # # to uint32 in postMessage() - # let lowerTTL = 2'u32 # Lower TTL as we need to wait for messages to expire - # for i in countdown(10, 1): - # check node2.postMessage(ttl = lowerTTL, topic = topic, payload = payload) == true - # check node2.protocolState(Waku).queue.items.len == 10 - - # await sleepAsync(waitInterval) - # check node1.protocolState(Waku).queue.items.len == 10 - - # await sleepAsync(milliseconds((lowerTTL+1)*1000)) - # check node1.protocolState(Waku).queue.items.len == 0 - # check node2.protocolState(Waku).queue.items.len == 0 - - # resetMessageQueues(node1, node2) - - # asyncTest "P2P post": - # let topic = [byte 0, 0, 0, 0] - # var f: Future[int] = newFuture[int]() - # proc handler(msg: ReceivedMessage) = - # check msg.decoded.payload == repeat(byte 4, 10) - # f.complete(1) - - # var filter = node1.subscribeFilter(newFilter(topics = @[topic], - # allowP2P = true), handler) - # check: - # node1.setPeerTrusted(toNodeId(node2.keys.pubkey)) == true - # node2.postMessage(ttl = 10, topic = topic, - # payload = repeat(byte 4, 10), - # targetPeer = some(toNodeId(node1.keys.pubkey))) == true - - # await f.withTimeout(waitInterval) - # f.read() == 1 - # node1.protocolState(Waku).queue.items.len == 0 - # node2.protocolState(Waku).queue.items.len == 0 - - # node1.unsubscribeFilter(filter) == true - - # asyncTest "Light node posting": - # var ln1 = setupTestNode(Waku) - # ln1.setLightNode(true) - - # await ln1.peerPool.connectToNode(newNode(initENode(node2.keys.pubKey, - # node2.address))) - - # let topic = [byte 0, 0, 0, 0] - - # check: - # # normal post - # ln1.postMessage(ttl = safeTTL, topic = topic, - # payload = repeat(byte 0, 10)) == false - # ln1.protocolState(Waku).queue.items.len == 0 - # # P2P post - # ln1.postMessage(ttl = safeTTL, topic = topic, - # payload = repeat(byte 0, 10), - # targetPeer = some(toNodeId(node2.keys.pubkey))) == true - # ln1.protocolState(Waku).queue.items.len == 0 - - # asyncTest "Connect two light nodes": - # var ln1 = setupTestNode(Waku) - # var ln2 = setupTestNode(Waku) - - # ln1.setLightNode(true) - # ln2.setLightNode(true) - - # ln2.startListening() - # let peer = await ln1.rlpxConnect(newNode(initENode(ln2.keys.pubKey, - # ln2.address))) - # check peer.isNil == true - - asyncTest "WakuWhisper and Whisper peers connected": - check: - nodeWakuWhisper.peerPool.connectedNodes.len() == 1 - - asyncTest "WhisperWaku and Whisper filters with topics": - let topic1 = [byte 0x12, 0, 0, 0] - let topic2 = [byte 0x34, 0, 0, 0] - var payloads = [repeat(byte 0, 10), repeat(byte 1, 10)] - var futures = [newFuture[int](), newFuture[int]()] - - proc handler1(msg: whisper.ReceivedMessage) = - check msg.decoded.payload == payloads[0] - futures[0].complete(1) - proc handler2(msg: whisper.ReceivedMessage) = - check msg.decoded.payload == payloads[1] - futures[1].complete(1) - - var filter1 = nodeWakuWhisper.subscribeFilter(whisper.newFilter(topics = @[topic1]), handler1) - var filter2 = nodeWakuWhisper.subscribeFilter(whisper.newFilter(topics = @[topic2]), handler2) - - check: - whisper.postMessage(nodeWhisper, ttl = safeTTL + 1, topic = topic1, - payload = payloads[0]) == true - whisper.postMessage(nodeWhisper, ttl = safeTTL, topic = topic2, - payload = payloads[1]) == true - nodeWhisper.protocolState(Whisper).queue.items.len == 2 - - await allFutures(futures).withTimeout(waitInterval) - - # This shows WakuWhisper can receive Whisper messages - # TODO: This should also make its way to Waku state! Where? - nodeWakuWhisper.protocolState(Whisper).queue.items.len == 2 - - # XXX: How does this look with protocol state for waku and whisper? - whisper.unsubscribeFilter(nodeWakuWhisper, filter1) == true - whisper.unsubscribeFilter(nodeWakuWhisper, filter2) == true - - # XXX: This reads a bit weird, but eh - waku.resetMessageQueue(nodeWakuWhisper) - whisper.resetMessageQueue(nodeWakuWhisper) - whisper.resetMessageQueue(nodeWhisper) - - check: - nodeWhisper.protocolState(Whisper).queue.items.len == 0 - nodeWakuWhisper.protocolState(Whisper).queue.items.len == 0 - - # TODO: Add test for Waku node also listening on Whisper topic From 7cb063d96197b2090a5cc6109c8850962b49e0c5 Mon Sep 17 00:00:00 2001 From: kdeme Date: Wed, 20 Nov 2019 18:27:34 +0100 Subject: [PATCH 12/13] Fix waku bridge test --- tests/p2p/test_waku_bridge.nim | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/p2p/test_waku_bridge.nim b/tests/p2p/test_waku_bridge.nim index abf7e453..4f17c009 100644 --- a/tests/p2p/test_waku_bridge.nim +++ b/tests/p2p/test_waku_bridge.nim @@ -33,7 +33,8 @@ suite "Waku - Whisper bridge tests": asyncTest "WakuWhisper and Whisper peers connected": check: - nodeWakuWhisper.peerPool.connectedNodes.len() == 2 + nodeWhisper.peerPool.connectedNodes.len() == 1 + nodeWaku.peerPool.connectedNodes.len() == 1 asyncTest "Whisper - Waku communcation via bridge": # topic whisper node subscribes to, waku node posts to From 21f543d227813fd1441e73f22eb14527c0edf0de Mon Sep 17 00:00:00 2001 From: kdeme Date: Thu, 21 Nov 2019 11:03:43 +0100 Subject: [PATCH 13/13] cleaner solution for sharing the queue --- eth/p2p/rlpx_protocols/waku_protocol.nim | 10 +++++----- tests/p2p/test_waku_bridge.nim | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/eth/p2p/rlpx_protocols/waku_protocol.nim b/eth/p2p/rlpx_protocols/waku_protocol.nim index ff048863..99c88c56 100644 --- a/eth/p2p/rlpx_protocols/waku_protocol.nim +++ b/eth/p2p/rlpx_protocols/waku_protocol.nim @@ -100,11 +100,8 @@ proc run(peer: Peer) {.gcsafe, async.} proc run(node: EthereumNode, network: WakuNetwork) {.gcsafe, async.} proc initProtocolState*(network: WakuNetwork, node: EthereumNode) {.gcsafe.} = - if node.protocolState(Whisper).isNil: - new(network.queue) - network.queue[] = initQueue(defaultQueueCapacity) - else: - network.queue = node.protocolState(Whisper).queue + new(network.queue) + network.queue[] = initQueue(defaultQueueCapacity) network.filters = initTable[string, Filter]() network.config.bloom = fullBloom() network.config.powRequirement = defaultMinPow @@ -466,3 +463,6 @@ proc resetMessageQueue*(node: EthereumNode) = ## ## NOTE: Not something that should be run in normal circumstances. node.protocolState(Waku).queue[] = initQueue(defaultQueueCapacity) + +proc shareMessageQueue*(node: EthereumNode) = + node.protocolState(Waku).queue = node.protocolState(Whisper).queue diff --git a/tests/p2p/test_waku_bridge.nim b/tests/p2p/test_waku_bridge.nim index 4f17c009..ed548ae2 100644 --- a/tests/p2p/test_waku_bridge.nim +++ b/tests/p2p/test_waku_bridge.nim @@ -28,6 +28,8 @@ suite "Waku - Whisper bridge tests": nodeWakuWhisper.startListening() let bridgeNode = newNode(initENode(nodeWakuWhisper.keys.pubKey, nodeWakuWhisper.address)) + nodeWakuWhisper.shareMessageQueue() + waitFor nodeWhisper.peerPool.connectToNode(bridgeNode) waitFor nodeWaku.peerPool.connectToNode(bridgeNode)