diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000..f8d9091f --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,39 @@ +name: ci +on: + push: + branches: + - master + pull_request: + branches: + - master + +jobs: + check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - run: npm install + - run: npx aegir lint + - uses: gozala/typescript-error-reporter-action@v1.0.8 + - run: npx aegir build + - run: npx aegir dep-check + - uses: ipfs/aegir/actions/bundle-size@master + name: size + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + test-node: + needs: check + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-latest, ubuntu-latest, macos-latest] + node: [14, 16] + fail-fast: true + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node }} + - run: npm install + - run: npx nyc --reporter=lcov aegir test -t node -- --bail + - uses: codecov/codecov-action@v1 diff --git a/.gitignore b/.gitignore index 141b7976..b831c4d9 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ test/test-data/go-ipfs-repo/LOG.old # while testing npm5 package-lock.json yarn.lock +.nyc_output diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 8cd77290..00000000 --- a/.travis.yml +++ /dev/null @@ -1,42 +0,0 @@ -language: node_js -cache: npm - -branches: - only: - - master - - /^release\/.*$/ - -stages: - - check - - test - - cov - -node_js: - - 'lts/*' - - 'node' - -os: - - linux - - osx - -before_install: - # modules with pre-built binaries may not have deployed versions for bleeding-edge node so this lets us fall back to building from source - - npm install -g @mapbox/node-pre-gyp - -script: npx nyc -s npm run test:node -- --bail -after_success: npx nyc report --reporter=text-lcov > coverage.lcov && npx codecov - -jobs: - include: - - os: windows - filter_secrets: false - cache: false - - - stage: check - script: - - npx aegir build --bundlesize - - npx aegir dep-check - - npm run lint - -notifications: - email: false diff --git a/README.md b/README.md index 61de2ab4..87e39157 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ -# js-libp2p-kad-dht +# js-libp2p-kad-dht [![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) [![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/) [![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) [![Discourse posts](https://img.shields.io/discourse/https/discuss.libp2p.io/posts.svg)](https://discuss.libp2p.io) -[![Build Status](https://travis-ci.org/libp2p/js-libp2p-kad-dht.svg?style=flat-square)](https://travis-ci.org/libp2p/js-libp2p-kad-dht) +[![Build status](https://github.com/libp2p/js-libp2p-kad-dht/actions/workflows/test.yml/badge.svg?branch=master)](https://github.com/libp2p/js-libp2p-kad-dht/actions/workflows/test.yml) [![Coverage Status](https://coveralls.io/repos/github/libp2p/js-libp2p-kad-dht/badge.svg?branch=master)](https://coveralls.io/github/libp2p/js-libp2p-kad-dht?branch=master) [![Dependency Status](https://david-dm.org/libp2p/js-libp2p-kad-dht.svg?style=flat-square)](https://david-dm.org/libp2p/js-libp2p-kad-dht) [![Bundle Size](https://flat.badgen.net/bundlephobia/minzip/libp2p-kad-dht)](https://bundlephobia.com/result?p=libp2p-kad-dht) @@ -15,19 +15,23 @@ > JavaScript implementation of the Kademlia DHT for libp2p, based on [go-libp2p-kad-dht](https://github.com/libp2p/go-libp2p-kad-dht). -## Lead Maintainer +## Lead Maintainer [Vasco Santos](https://github.com/vasco-santos). -## Table of Contents +## Table of Contents - [Install](#install) - [npm](#npm) - [Use in Node.js](#use-in-nodejs) - [API](#api) + - [Custom secondary DHT in libp2p](#custom-secondary-dht-in-libp2p) + - [Peer Routing](#peer-routing) + - [Content Routing](#content-routing) + - [Peer Discovery](#peer-discovery) + - [Implementation Summary](#implementation-summary) - [Contribute](#contribute) - [License](#license) - ## Install ### npm @@ -39,7 +43,7 @@ ### Use in Node.js ```js -const KadDHT = require('libp2p-kad-dht') +import { create } from 'libp2p-kad-dht' ``` ## API @@ -51,20 +55,18 @@ The libp2p-kad-dht module offers 3 APIs: Peer Routing, Content Routing and Peer ### Custom secondary DHT in libp2p ```js +import { create } from 'libp2p-kad-dht' + /** * @param {Libp2p} libp2p */ function addDHT(libp2p) { - const customDHT = new KadDHT({ + const customDHT = create({ libp2p, - dialer: libp2p.dialer, - peerId: libp2p.peerId, - peerStore: libp2p.peerStore, - registrar: libp2p.registrar, protocolPrefix: '/custom' }) customDHT.start() - customDHT.on('peer', libp2p._onDiscoveryPeer) + return customDHT } ``` diff --git a/docs/IMPL_SUMMARY.MD b/docs/IMPL_SUMMARY.MD index 08a44cc7..23dbacc1 100644 --- a/docs/IMPL_SUMMARY.MD +++ b/docs/IMPL_SUMMARY.MD @@ -1,67 +1,26 @@ -# js-libp2p-kad-dht +# js-libp2p-kad-dht -js-libp2p-kad-dht is a JavaScript implementation of the [Kademlia DHT](http://www.scs.stanford.edu/%7Edm/home/papers/kpos.pdf) with some features of S/Kademlia. A "provider" node uses the DHT to advertise that it has a particular piece of content, and "querying" nodes will search the DHT for peers that have a particular piece of content. Content is modeled as a value that is identified by a key, where the key and value are Buffers. +js-libp2p-kad-dht is a JavaScript implementation of the [Kademlia DHT][] with some features of [S/Kademlia][]. A "provider" node uses the DHT to advertise that it has a particular piece of content, and "querying" nodes will search the DHT for peers that have a particular piece of content. Content is modelled as a value that is identified by a key, where the key and value are Uint8Arrays. -#### DHT Identifiers +## Table of contents -The DHT uses a sha2-256 hash for identifiers: -- For peers the DHT identifier is the hash of the [PeerId][PeerId] -- For content the DHT identifier is the hash of the key (eg a Block CID) +- [Spec](#spec) +- [Extensions](#extensions) + - [Disjoint paths](#disjoint-paths) -#### FIND_NODE +## Spec -`findPeer (PeerId):` [PeerInfo][PeerInfo] +js-libp2p-kad-dht follows the [libp2p/kad-dht spec](https://github.com/libp2p/specs/tree/master/kad-dht) and implements the algorithms described in the [IPFS DHT documentation](https://docs.ipfs.io/concepts/dht/). -The address space is so large (256 bits) that there are big gaps between DHT ids, and nodes frequently join and leave the DHT. +## Extensions -To find a particular node -- the `querying node` converts the [PeerId][PeerId] to a DHT id -- the `querying node` sends a request to the nearest peers to that DHT id that it knows about -- those peers respond with the nearest peers to the DHT id that they know about -- the `querying node` sorts the responses and recursively queries the closest peers to the DHT id, continuing until it finds the node or it has queried all the closest peers. +js-libp2p-kad-dht implements some additional functionality not described in the libp2p KAD-DHT spec. -#### PUT +### Disjoint paths -`put (Key, Value)` - -To store a value in the DHT, the `provider node` -- converts the key to a DHT id -- follows the "closest peers" algorithm as above to find the nearest peers to the DHT id -- sends the value to those nearest peers - -Note that DHT nodes will only store values that are accepted by its "validators", configurable functions that validate the key/value to ensure the node can control what kind of content it stores (eg IPNS records). - -#### GET - -`get (Key): [Value]` - -To retrieve a value from the DHT -- the `querying node` converts the key to a DHT id -- the `querying node` follows the "closest peers" algorithm to find the nearest peers to the DHT id -- at each iteration of the algorithm, if the peer has the value it responds with the value itself in addition to closer peers. - -Note that the value for a particular key is stored by many nodes, and these nodes receive `PUT` requests asynchronously, so it's possible that nodes may have distinct values for the same key. For example if node A `PUT`s the value `hello` to key `greeting` and node B concurrently `PUT`s the value `bonjour` to key `greeting`, some nodes close to the key `greeting` may receive `hello` first and others may receive `bonjour` first. - -Therefore a `GET` request to the DHT may collect distinct values (eg `hello` and `bonjour`) for a particular key from the nodes close to the key. The DHT has "selectors", configurable functions that choose the "best" value (for example IPNS records include a sequence number, and the "best" value is the record with the highest sequence number). - -#### PROVIDE - -`provide (Key)` - -To advertise that it has the content for a particular key -- the `provider node` converts the key to a DHT id -- the `provider node` follows the "closest peers" algorithm to find the nearest peers to the DHT id -- the `provider node` sends a "provide" message to each of the nearest peers -- each of the nearest peers saves the association between the "provider" peer and the key - -#### FIND_PROVIDERS - -`findProviders (Key):` [[PeerInfo][PeerInfo]] - -To find providers for a particular key -- the `querying node` converts the key to a DHT id -- the `querying node` follows the "closest peers" algorithm to find the nearest peers to the DHT id -- at each iteration of the algorithm, if the peer knows which nodes are providing the value it responds with the provider nodes in addition to closer peers. +js-libp2p-kad-dht uses disjoint paths when performing DHT queries. These are described in the [S/Kademlia][] paper. +[Kademlia DHT]: (http://www.scs.stanford.edu/%7Edm/home/papers/kpos.pdf) +[S/Kademlia]: (https://git.gnunet.org/bibliography.git/plain/docs/SKademlia2007.pdf) [PeerId]: https://github.com/libp2p/js-peer-id [PeerInfo]: https://github.com/libp2p/js-peer-info diff --git a/package.json b/package.json index 7300a42c..45efd36d 100644 --- a/package.json +++ b/package.json @@ -5,8 +5,8 @@ "leadMaintainer": "Vasco Santos ", "main": "src/index.js", "scripts": { - "lint": "aegir ts -p check && aegir lint", "prepare": "npm run build", + "lint": "aegir ts -p check && aegir lint", "build": "npm run build:proto && npm run build:proto-types && aegir build", "build:proto": "pbjs -t static-module -w commonjs -r libp2p-dht-message --force-number --no-verify --no-delimited --no-create --no-beautify --no-defaults --lint eslint-disable -o src/message/dht.js ./src/message/dht.proto", "build:proto-types": "pbts -o src/message/dht.d.ts src/message/dht.js", @@ -18,7 +18,8 @@ "release-major": "aegir release --type major --docs -t node", "coverage": "aegir coverage", "coverage-publish": "aegir-coverage publish", - "sim": "node test/simulation/index.js" + "sim": "node test/simulation/index.js", + "dep-check": "aegir dep-check" }, "files": [ "src", @@ -47,29 +48,37 @@ "homepage": "https://github.com/libp2p/js-libp2p-kad-dht", "types": "dist/src/index.d.ts", "dependencies": { + "any-signal": "^2.1.2", "datastore-core": "^6.0.7", "debug": "^4.3.1", "err-code": "^3.0.0", "hashlru": "^2.3.0", - "heap": "~0.2.6", "interface-datastore": "^6.0.2", + "it-all": "^1.0.5", + "it-drain": "^1.0.4", "it-first": "^1.0.4", "it-length": "^1.0.3", "it-length-prefixed": "^5.0.2", + "it-map": "^1.0.5", + "it-merge": "^1.0.3", + "it-parallel": "^2.0.1", "it-pipe": "^1.1.0", + "it-take": "^1.0.2", "k-bucket": "^5.1.0", "libp2p-crypto": "^0.19.5", "libp2p-interfaces": "^1.0.0", "libp2p-record": "^0.10.4", "multiaddr": "^10.0.0", "multiformats": "^9.4.5", + "native-abort-controller": "^1.0.4", + "p-defer": "^3.0.0", "p-map": "^4.0.0", "p-queue": "^6.6.2", - "p-timeout": "^4.1.0", "peer-id": "^0.15.0", + "private-ip": "^2.3.3", "protobufjs": "^6.10.2", "streaming-iterables": "^6.0.0", - "timeout-abort-controller": "^1.1.1", + "timeout-abort-controller": "^2.0.0", "uint8arrays": "^3.0.0", "varint": "^6.0.0" }, @@ -77,17 +86,15 @@ "@types/debug": "^4.1.7", "aegir": "^35.0.1", "async-iterator-all": "^1.0.0", - "crypto-browserify": "^3.12.0", "datastore-level": "^7.0.1", "delay": "^5.0.0", "execa": "^5.1.1", + "it-filter": "^1.0.3", + "it-last": "^1.0.6", "it-pair": "^1.0.0", - "libp2p": "^0.32.3", + "libp2p": "^0.33.0", "lodash.random": "^3.2.0", "lodash.range": "^3.2.0", - "p-defer": "^3.0.0", - "p-each-series": "^2.1.0", - "p-map-series": "^2.1.0", "p-retry": "^4.2.0", "sinon": "^11.1.1", "which": "^2.0.2" diff --git a/src/constants.js b/src/constants.js index 0d30643d..f1f004b3 100644 --- a/src/constants.js +++ b/src/constants.js @@ -33,3 +33,6 @@ exports.K = 20 // Alpha is the concurrency for asynchronous requests exports.ALPHA = 3 + +// How often we look for our closest DHT neighbours +exports.QUERY_SELF_INTERVAL = Number(5 * minute) diff --git a/src/content-fetching/index.js b/src/content-fetching/index.js index 75e84812..88544c43 100644 --- a/src/content-fetching/index.js +++ b/src/content-fetching/index.js @@ -1,30 +1,59 @@ 'use strict' const errcode = require('err-code') -const pTimeout = require('p-timeout') const { equals: uint8ArrayEquals } = require('uint8arrays/equals') const { toString: uint8ArrayToString } = require('uint8arrays/to-string') -const libp2pRecord = require('libp2p-record') -const c = require('../constants') -const Query = require('../query') +const Libp2pRecord = require('libp2p-record') +const { + ALPHA +} = require('../constants') const utils = require('../utils') -const Record = libp2pRecord.Record +const Record = Libp2pRecord.Record +const parallel = require('it-parallel') +const map = require('it-map') +const { + valueEvent, + queryErrorEvent +} = require('../query/events') +const { Message } = require('../message') +const { pipe } = require('it-pipe') /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../query').DHTQueryResult} DHTQueryResult + * @typedef {import('../types').ValueEvent} ValueEvent */ -/** - * @param {import('../')} dht - */ -module.exports = (dht) => { +class ContentFetching { + /** + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('interface-datastore').Datastore} params.datastore + * @param {import('libp2p-interfaces/src/types').DhtValidators} params.validators + * @param {import('libp2p-interfaces/src/types').DhtSelectors} params.selectors + * @param {import('../peer-routing').PeerRouting} params.peerRouting + * @param {import('../query/manager').QueryManager} params.queryManager + * @param {import('../routing-table').RoutingTable} params.routingTable + * @param {import('../network').Network} params.network + * @param {boolean} params.lan + */ + constructor ({ peerId, datastore, validators, selectors, peerRouting, queryManager, routingTable, network, lan }) { + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:content-fetching`) + this._peerId = peerId + this._datastore = datastore + this._validators = validators + this._selectors = selectors + this._peerRouting = peerRouting + this._queryManager = queryManager + this._routingTable = routingTable + this._network = network + } + /** * @param {Uint8Array} key * @param {Uint8Array} rec */ - const putLocal = async (key, rec) => { // eslint-disable-line require-await - return dht.datastore.put(utils.bufferToKey(key), rec) + async putLocal (key, rec) { // eslint-disable-line require-await + return this._datastore.put(utils.bufferToKey(key), rec) } /** @@ -33,15 +62,18 @@ module.exports = (dht) => { * * @param {Uint8Array} key */ - const getLocal = async (key) => { - dht._log(`getLocal ${uint8ArrayToString(key, 'base32')}`) + async getLocal (key) { + this._log(`getLocal ${uint8ArrayToString(key, 'base32')}`) + + const dsKey = utils.bufferToKey(key) - const raw = await dht.datastore.get(utils.bufferToKey(key)) - dht._log(`found ${uint8ArrayToString(key, 'base32')} in local datastore`) + this._log(`fetching record for key ${dsKey}`) + const raw = await this._datastore.get(dsKey) + this._log(`found ${dsKey} in local datastore`) const rec = Record.deserialize(raw) - await dht._verifyRecordLocally(rec) + await Libp2pRecord.validator.verifyRecord(this._validators, rec) return rec } @@ -50,253 +82,212 @@ module.exports = (dht) => { * Send the best record found to any peers that have an out of date record. * * @param {Uint8Array} key - * @param {import('../query').DHTQueryValue[]} vals - values retrieved from the DHT + * @param {ValueEvent[]} vals - values retrieved from the DHT * @param {Uint8Array} best - the best record that was found + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - const sendCorrectionRecord = async (key, vals, best) => { + async * sendCorrectionRecord (key, vals, best, options = {}) { + this._log('sendCorrection for %b', key) const fixupRec = await utils.createPutRecord(key, best) - return Promise.all(vals.map(async (v) => { + for (const { value, from } of vals) { // no need to do anything - if (uint8ArrayEquals(v.val, best)) { - return + if (uint8ArrayEquals(value, best)) { + this._log('record was ok') + continue } // correct ourself - if (dht._isSelf(v.from)) { + if (this._peerId.equals(from)) { try { - await dht._putLocal(key, fixupRec) + const dsKey = utils.bufferToKey(key) + this._log(`Storing corrected record for key ${dsKey}`) + await this._datastore.put(dsKey, fixupRec) } catch (/** @type {any} */ err) { - dht._log.error('Failed error correcting self', err) + this._log.error('Failed error correcting self', err) } - return - } - // send correction - try { - await dht._putValueToPeer(key, fixupRec, v.from) - } catch (/** @type {any} */ err) { - dht._log.error('Failed error correcting entry', err) + continue } - })) - } - return { - /** - * Store the given key/value pair locally, in the datastore. - * - * @param {Uint8Array} key - * @param {Uint8Array} rec - encoded record - */ - async _putLocal (key, rec) { // eslint-disable-line require-await - return putLocal(key, rec) - }, - - /** - * Store the given key/value pair in the DHT. - * - * @param {Uint8Array} key - * @param {Uint8Array} value - * @param {object} [options] - put options - * @param {number} [options.minPeers] - minimum number of peers required to successfully put (default: closestPeers.length) - */ - async put (key, value, options = {}) { - dht._log('PutValue %b', key) - - // create record in the dht format - const record = await utils.createPutRecord(key, value) - - // store the record locally - await putLocal(key, record) - - // put record to the closest peers - let counterAll = 0 - let counterSuccess = 0 + // send correction + let sentCorrection = false + const request = new Message(Message.TYPES.PUT_VALUE, key, 0) + request.record = Record.deserialize(fixupRec) - await utils.mapParallel(dht.getClosestPeers(key, { shallow: true }), async (peer) => { - try { - counterAll += 1 - await dht._putValueToPeer(key, record, peer) - counterSuccess += 1 - } catch (/** @type {any} */ err) { - dht._log.error('Failed to put to peer (%b): %s', peer.id, err) + for await (const event of this._network.sendRequest(from, request, options)) { + if (event.name === 'PEER_RESPONSE' && event.record && uint8ArrayEquals(event.record.value, Record.deserialize(fixupRec).value)) { + sentCorrection = true } - }) - // verify if we were able to put to enough peers - const minPeers = options.minPeers || counterAll // Ensure we have a default `minPeers` - - if (minPeers > counterSuccess) { - const error = errcode(new Error(`Failed to put value to enough peers: ${counterSuccess}/${minPeers}`), 'ERR_NOT_ENOUGH_PUT_PEERS') - dht._log.error(error) - throw error + yield event } - }, - - /** - * Get the value to the given key. - * Times out after 1 minute by default. - * - * @param {Uint8Array} key - * @param {object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - */ - async get (key, options = {}) { - options.timeout = options.timeout || c.minute - - dht._log('_get %b', key) - const vals = await dht.getMany(key, c.GET_MANY_RECORD_COUNT, options) - const recs = vals.map((v) => v.val) - let i = 0 - - try { - i = libp2pRecord.selection.bestRecord(dht.selectors, key, recs) - } catch (/** @type {any} */ err) { - // Assume the first record if no selector available - if (err.code !== 'ERR_NO_SELECTOR_FUNCTION_FOR_RECORD_KEY') { - throw err - } + if (!sentCorrection) { + yield queryErrorEvent({ from, error: errcode(new Error('value not put correctly'), 'ERR_PUT_VALUE_INVALID') }) } - const best = recs[i] - dht._log('GetValue %b %s', key, best) + this._log.error('Failed error correcting entry') + } + } - if (!best) { - throw errcode(new Error('best value was not found'), 'ERR_NOT_FOUND') - } + /** + * Store the given key/value pair in the DHT + * + * @param {Uint8Array} key + * @param {Uint8Array} value + * @param {object} [options] - put options + * @param {AbortSignal} [options.signal] + */ + async * put (key, value, options = {}) { + this._log('put key %b value %b', key, value) + + // create record in the dht format + const record = await utils.createPutRecord(key, value) + + // store the record locally + const dsKey = utils.bufferToKey(key) + this._log(`storing record for key ${dsKey}`) + await this._datastore.put(dsKey, record) + + // put record to the closest peers + yield * pipe( + this._peerRouting.getClosestPeers(key, { signal: options.signal }), + (source) => map(source, (event) => { + return async () => { + if (event.name !== 'FINAL_PEER') { + return [event] + } - await sendCorrectionRecord(key, vals, best) + const events = [] - return best - }, + const msg = new Message(Message.TYPES.PUT_VALUE, key, 0) + msg.record = Record.deserialize(record) - /** - * Get the `n` values to the given key without sorting. - * - * @param {Uint8Array} key - * @param {number} nvals - * @param {object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - */ - async getMany (key, nvals, options = {}) { - options.timeout = options.timeout || c.minute + for await (const putEvent of this._network.sendRequest(event.peer.id, msg, options)) { + events.push(putEvent) - dht._log('getMany %b (%s)', key, nvals) + if (putEvent.name !== 'PEER_RESPONSE') { + continue + } - const vals = [] - let localRec + if (putEvent.record && uint8ArrayEquals(putEvent.record.value, Record.deserialize(record).value)) { + } else { + events.push(queryErrorEvent({ from: event.peer.id, error: errcode(new Error('value not put correctly'), 'ERR_PUT_VALUE_INVALID') })) + } + } - try { - localRec = await getLocal(key) - } catch (/** @type {any} */ err) { - if (nvals === 0) { - throw err + return events + } + }), + (source) => parallel(source, { + ordered: false, + concurrency: ALPHA + }), + async function * (source) { + for await (const events of source) { + yield * events } } + ) + } - if (localRec) { - vals.push({ - val: localRec.value, - from: dht.peerId - }) - } + /** + * Get the value to the given key + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * get (key, options = {}) { + this._log('get %b', key) + + /** @type {ValueEvent[]} */ + const vals = [] - if (vals.length >= nvals) { - return vals + for await (const event of this.getMany(key, options)) { + if (event.name === 'VALUE') { + vals.push(event) } - const id = await utils.convertBuffer(key) - const rtp = dht.routingTable.closestPeers(id, dht.kBucketSize) + yield event + } - dht._log('peers in rt: %d', rtp.length) + if (!vals.length) { + return + } - if (rtp.length === 0) { - const errMsg = 'Failed to lookup key! No peers from routing table!' + const records = vals.map((v) => v.value) + let i = 0 - dht._log.error(errMsg) - if (vals.length === 0) { - throw errcode(new Error(errMsg), 'ERR_NO_PEERS_IN_ROUTING_TABLE') - } - return vals + try { + i = Libp2pRecord.selection.bestRecord(this._selectors, key, records) + } catch (/** @type {any} */ err) { + // Assume the first record if no selector available + if (err.code !== 'ERR_NO_SELECTOR_FUNCTION_FOR_RECORD_KEY') { + throw err } + } - const valsLength = vals.length - - /** - * @param {number} pathIndex - * @param {number} numPaths - */ - function createQuery (pathIndex, numPaths) { - // This function body runs once per disjoint path - const pathSize = utils.pathSize(nvals - valsLength, numPaths) - let queryResults = 0 - - /** - * Here we return the query function to use on this particular disjoint path - * - * @param {PeerId} peer - */ - async function disjointPathQuery (peer) { - let rec, peers, lookupErr - try { - const results = await dht._getValueOrPeers(peer, key) - rec = results.record - peers = results.peers - } catch (/** @type {any} */ err) { - // If we have an invalid record we just want to continue and fetch a new one. - if (err.code !== 'ERR_INVALID_RECORD') { - throw err - } - lookupErr = err - } + const best = records[i] + this._log('GetValue %b %b', key, best) - /** @type {import('../query').QueryResult} */ - const res = { - closerPeers: peers - } + if (!best) { + throw errcode(new Error('best value was not found'), 'ERR_NOT_FOUND') + } - if (rec && rec.value) { - vals.push({ - val: rec.value, - from: peer - }) + yield * this.sendCorrectionRecord(key, vals, best, options) - queryResults++ - } else if (lookupErr) { - vals.push({ - err: lookupErr, - from: peer - }) + yield vals[i] + } - queryResults++ - } + /** + * Get the `n` values to the given key without sorting. + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * getMany (key, options = {}) { + this._log('getMany values for %t', key) - // enough is enough - if (queryResults >= pathSize) { - res.pathComplete = true - } + try { + const localRec = await this.getLocal(key) - return res - } + yield valueEvent({ + value: localRec.value, + from: this._peerId + }) + } catch (/** @type {any} */ err) { + this._log('error getting local value for %b', key, err) + } - return disjointPathQuery - } + const id = await utils.convertBuffer(key) + const rtp = this._routingTable.closestPeers(id) + + this._log('found %d peers in routing table', rtp.length) - // we have peers, lets send the actual query to them - const query = new Query(dht, key, createQuery) + const self = this - try { - await pTimeout(query.run(rtp), options.timeout) - } catch (/** @type {any} */ err) { - if (vals.length === 0) { - throw err + /** + * @type {import('../query/types').QueryFunc} + */ + const getValueQuery = async function * ({ peer, signal }) { + for await (const event of self._peerRouting.getValueOrPeers(peer, key, { signal })) { + yield event + + if (event.name === 'PEER_RESPONSE' && event.record) { + yield valueEvent({ from: peer, value: event.record.value }) } - } finally { - query.stop() } - - return vals } + + // we have peers, lets send the actual query to them + yield * this._queryManager.run(key, rtp, getValueQuery, options) } } + +module.exports.ContentFetching = ContentFetching diff --git a/src/content-routing/index.js b/src/content-routing/index.js index 41708399..efc4e607 100644 --- a/src/content-routing/index.js +++ b/src/content-routing/index.js @@ -1,13 +1,17 @@ 'use strict' -const errcode = require('err-code') -const pTimeout = require('p-timeout') - -const c = require('../constants') -const LimitedPeerList = require('../peer-list/limited-peer-list') -const Message = require('../message') -const Query = require('../query') -const utils = require('../utils') +const { Message } = require('../message') +const parallel = require('it-parallel') +const map = require('it-map') +const { convertBuffer, logger } = require('../utils') +const { ALPHA } = require('../constants') +const { pipe } = require('it-pipe') +const { + queryErrorEvent, + peerResponseEvent, + providerEvent +} = require('../query/events') +const { Message: { MessageType } } = require('../message/dht') /** * @typedef {import('multiformats/cid').CID} CID @@ -15,184 +19,178 @@ const utils = require('../utils') * @typedef {import('multiaddr').Multiaddr} Multiaddr */ -/** - * @param {import('../')} dht - */ -module.exports = (dht) => { +class ContentRouting { + /** + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('../network').Network} params.network + * @param {import('../peer-routing').PeerRouting} params.peerRouting + * @param {import('../query/manager').QueryManager} params.queryManager + * @param {import('../routing-table').RoutingTable} params.routingTable + * @param {import('../providers').Providers} params.providers + * @param {import('../types').PeerStore} params.peerStore + * @param {boolean} params.lan + */ + constructor ({ peerId, network, peerRouting, queryManager, routingTable, providers, peerStore, lan }) { + this._log = logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:content-routing`) + this._peerId = peerId + this._network = network + this._peerRouting = peerRouting + this._queryManager = queryManager + this._routingTable = routingTable + this._providers = providers + this._peerStore = peerStore + } + /** - * Check for providers from a single node. + * Announce to the network that we can provide the value for a given key and + * are contactable on the given multiaddrs * - * @param {PeerId} peer * @param {CID} key - * - * @private + * @param {Multiaddr[]} multiaddrs + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - const findProvidersSingle = async (peer, key) => { // eslint-disable-line require-await - const msg = new Message(Message.TYPES.GET_PROVIDERS, key.bytes, 0) - return dht.network.sendRequest(peer, msg) - } + async * provide (key, multiaddrs, options = {}) { + this._log('provide %s', key) + + // Add peer as provider + await this._providers.addProvider(key, this._peerId) + + const msg = new Message(Message.TYPES.ADD_PROVIDER, key.bytes, 0) + msg.providerPeers = [{ + id: this._peerId, + multiaddrs + }] + + let sent = 0 - return { /** - * Announce to the network that we can provide the value for a given key - * - * @param {CID} key + * @param {import('../types').QueryEvent} event */ - async provide (key) { - dht._log(`provide: ${key}`) - - /** @type {Error[]} */ - const errors = [] - - // Add peer as provider - await dht.providers.addProvider(key, dht.peerId) - - const multiaddrs = dht.libp2p ? dht.libp2p.multiaddrs : [] - const msg = new Message(Message.TYPES.ADD_PROVIDER, key.bytes, 0) - msg.providerPeers = [{ - id: dht.peerId, - multiaddrs - }] - - /** - * @param {PeerId} peer - */ - async function mapPeer (peer) { - dht._log(`putProvider ${key} to ${peer.toB58String()}`) + const maybeNotifyPeer = (event) => { + return async () => { + if (event.name !== 'FINAL_PEER') { + return [event] + } + + const events = [] + + this._log('putProvider %s to %p', key, event.peer.id) + try { - await dht.network.sendMessage(peer, msg) + this._log('sending provider record for %s to %p', key, event.peer.id) + + for await (const sendEvent of this._network.sendMessage(event.peer.id, msg, options)) { + if (sendEvent.name === 'PEER_RESPONSE') { + this._log('sent provider record for %s to %p', key, event.peer.id) + sent++ + } + + events.push(sendEvent) + } } catch (/** @type {any} */ err) { - errors.push(err) + this._log.error('error sending provide record to peer %p', event.peer.id, err) + events.push(queryErrorEvent({ from: event.peer.id, error: err })) } - } - // Notify closest peers - await utils.mapParallel(dht.getClosestPeers(key.bytes), mapPeer) + return events + } + } - if (errors.length) { - // TODO: - // This should be infrequent. This means a peer we previously connected - // to failed to exchange the provide message. If getClosestPeers was an - // iterator, we could continue to pull until we announce to kBucketSize peers. - throw errcode(new Error(`Failed to provide to ${errors.length} of ${dht.kBucketSize} peers`), 'ERR_SOME_PROVIDES_FAILED', { errors }) + // Notify closest peers + yield * pipe( + this._peerRouting.getClosestPeers(key.multihash.bytes, options), + (source) => map(source, (event) => maybeNotifyPeer(event)), + (source) => parallel(source, { + ordered: false, + concurrency: ALPHA + }), + async function * (source) { + for await (const events of source) { + yield * events + } } - }, + ) + + this._log('sent provider records to %d peers', sent) + } + + /** + * Search the dht for up to `K` providers of the given CID. + * + * @param {CID} key + * @param {object} [options] - findProviders options + * @param {number} [options.maxNumProviders=5] - maximum number of providers to find + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * findProviders (key, options = { maxNumProviders: 5 }) { + const toFind = options.maxNumProviders || this._routingTable._kBucketSize + const target = key.multihash.bytes + const id = await convertBuffer(target) + const self = this + + this._log(`findProviders ${key}`) + + const provs = await this._providers.getProviders(key) + + // yield values if we have some, also slice because maybe we got lucky and already have too many? + if (provs.length) { + const providers = provs.slice(0, toFind).map(peerId => ({ + id: peerId, + multiaddrs: (this._peerStore.addressBook.get(peerId) || []).map(address => address.multiaddr) + })) + + yield peerResponseEvent({ from: this._peerId, messageType: MessageType.GET_PROVIDERS, providers }) + yield providerEvent({ from: this._peerId, providers: providers }) + } + + // All done + if (provs.length >= toFind) { + return + } /** - * Search the dht for up to `K` providers of the given CID. + * The query function to use on this particular disjoint path * - * @param {CID} key - * @param {Object} [options] - findProviders options - * @param {number} [options.timeout=60000] - how long the query should maximally run, in milliseconds - * @param {number} [options.maxNumProviders=5] - maximum number of providers to find - * @returns {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} + * @type {import('../query/types').QueryFunc} */ - async * findProviders (key, options = { timeout: 60000, maxNumProviders: 5 }) { - const providerTimeout = options.timeout || c.minute - const n = options.maxNumProviders || c.K - - dht._log(`findProviders ${key}`) - - const out = new LimitedPeerList(n) - const provs = await dht.providers.getProviders(key) - - provs - .forEach(id => { - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peerData = dht.peerStore.get(id) - - if (peerData) { - out.push({ - id: peerData.id, - multiaddrs: peerData.addresses - .map((address) => address.multiaddr) - }) - } else { - out.push({ - id, - multiaddrs: [] - }) - } - }) + const findProvidersQuery = async function * ({ peer, signal }) { + const request = new Message(Message.TYPES.GET_PROVIDERS, target, 0) - // All done - if (out.length >= n) { - // yield values - for (const pData of out.toArray()) { - yield pData - } - return - } + yield * self._network.sendRequest(peer, request, { signal }) + } - // need more, query the network - /** @type {LimitedPeerList[]} */ - const paths = [] - - /** - * - * @param {number} pathIndex - * @param {number} numPaths - */ - function makePath (pathIndex, numPaths) { - // This function body runs once per disjoint path - const pathSize = utils.pathSize(n - out.length, numPaths) - const pathProviders = new LimitedPeerList(pathSize) - paths.push(pathProviders) - - /** - * The query function to use on this particular disjoint path - * - * @param {PeerId} peer - */ - async function queryDisjointPath (peer) { - const msg = await findProvidersSingle(peer, key) - const provs = msg.providerPeers - dht._log(`Found ${provs.length} provider entries for ${key}`) - - provs.forEach((prov) => { - pathProviders.push({ - ...prov - }) - }) - - // hooray we have all that we want - if (pathProviders.length >= pathSize) { - return { pathComplete: true } - } + const providers = new Set(provs.map(p => p.toB58String())) - // it looks like we want some more - return { closerPeers: msg.closerPeers } - } + for await (const event of this._queryManager.run(target, this._routingTable.closestPeers(id), findProvidersQuery, options)) { + yield event - return queryDisjointPath - } + if (event.name === 'PEER_RESPONSE') { + this._log(`Found ${event.providers.length} provider entries for ${key} and ${event.closer.length} closer peers`) + + const newProviders = [] - const query = new Query(dht, key.bytes, makePath) - const peers = dht.routingTable.closestPeers(key.bytes, dht.kBucketSize) - - try { - await pTimeout( - query.run(peers), - providerTimeout - ) - } catch (/** @type {any} */ err) { - if (err.name !== pTimeout.TimeoutError.name) { - throw err + for (const peer of event.providers) { + if (providers.has(peer.id.toB58String())) { + continue + } + + providers.add(peer.id.toB58String()) + newProviders.push(peer) } - } finally { - query.stop() - } - // combine peers from each path - paths.forEach((path) => { - path.toArray().forEach((peer) => { - out.push(peer) - }) - }) + if (newProviders.length) { + yield providerEvent({ from: event.from, providers: newProviders }) + } - for (const pData of out.toArray()) { - yield pData + if (providers.size === toFind) { + return + } } } } } + +module.exports.ContentRouting = ContentRouting diff --git a/src/dual-kad-dht.js b/src/dual-kad-dht.js new file mode 100644 index 00000000..f4fa38b2 --- /dev/null +++ b/src/dual-kad-dht.js @@ -0,0 +1,362 @@ +'use strict' + +const { EventEmitter } = require('events') +const PeerId = require('peer-id') +const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const utils = require('./utils') +const errCode = require('err-code') +const merge = require('it-merge') + +const log = utils.logger('libp2p:kad-dht') + +/** + * @typedef {import('libp2p')} Libp2p + * @typedef {import('libp2p/src/peer-store')} PeerStore + * @typedef {import('interface-datastore').Datastore} Datastore + * @typedef {import('libp2p/src/dialer')} Dialer + * @typedef {import('libp2p/src/registrar')} Registrar + * @typedef {import('multiformats/cid').CID} CID + * @typedef {import('multiaddr').Multiaddr} Multiaddr + * @typedef {import('./kad-dht').KadDHT} KadDHT + * @typedef {import('./types').DHT} DHT + * @typedef {import('./types').QueryEvent} QueryEvent + * @typedef {import('./types').SendingQueryEvent} SendingQueryEvent + * @typedef {import('./types').PeerResponseEvent} PeerResponseEvent + * @typedef {import('./types').FinalPeerEvent} FinalPeerEvent + * @typedef {import('./types').QueryErrorEvent} QueryErrorEvent + * @typedef {import('./types').ProviderEvent} ProviderEvent + * @typedef {import('./types').ValueEvent} ValueEvent + * @typedef {import('./types').AddingPeerEvent} AddingPeerEvent + * @typedef {import('./types').DialingPeerEvent} DialingPeerEvent + * + * @typedef {object} KadDHTOps + * @property {Libp2p} libp2p - the libp2p instance + * @property {string} [protocol = '/ipfs/kad/1.0.0'] - libp2p registrar handle protocol + * @property {number} kBucketSize - k-bucket size (default 20) + * @property {boolean} clientMode - If true, the DHT will not respond to queries. This should be true if your node will not be dialable. (default: false) + * @property {import('libp2p-interfaces/src/types').DhtValidators} validators - validators object with namespace as keys and function(key, record, callback) + * @property {object} selectors - selectors object with namespace as keys and function(key, records) + * @property {number} querySelfInterval - how often to search the network for peers close to ourselves + */ + +/** + * A DHT implementation modelled after Kademlia with S/Kademlia modifications. + * Original implementation in go: https://github.com/libp2p/go-libp2p-kad-dht. + */ +class DualKadDHT extends EventEmitter { + /** + * Create a new KadDHT. + * + * @param {KadDHT} wan + * @param {KadDHT} lan + * @param {Libp2p} libp2p + */ + constructor (wan, lan, libp2p) { + super() + + this._wan = wan + this._lan = lan + this._libp2p = libp2p + this._datastore = libp2p.datastore || this._wan._datastore + + // handle peers being discovered during processing of DHT messages + this._wan.on('peer', (peerData) => { + this.emit('peer', peerData) + }) + this._lan.on('peer', (peerData) => { + this.emit('peer', peerData) + }) + } + + /** + * Is this DHT running. + */ + isStarted () { + return this._wan.isStarted() && this._lan.isStarted() + } + + /** + * Whether we are in client or server mode + */ + enableServerMode () { + this._wan.enableServerMode() + } + + /** + * Whether we are in client or server mode + */ + enableClientMode () { + this._wan.enableClientMode() + } + + /** + * Start listening to incoming connections. + */ + async start () { + await Promise.all([ + this._lan.start(), + this._wan.start() + ]) + } + + /** + * Stop accepting incoming connections and sending outgoing + * messages. + */ + async stop () { + await Promise.all([ + this._lan.stop(), + this._wan.stop() + ]) + } + + /** + * Store the given key/value pair in the DHT + * + * @param {Uint8Array} key + * @param {Uint8Array} value + * @param {object} [options] - put options + * @param {AbortSignal} [options.signal] + * @param {number} [options.minPeers] - minimum number of peers required to successfully put (default: closestPeers.length) + */ + async * put (key, value, options = {}) { // eslint-disable-line require-await + let counterAll = 0 + let counterErrors = 0 + + for await (const event of merge( + this._lan.put(key, value, options), + this._wan.put(key, value, options) + )) { + yield event + + if (event.name === 'SENDING_QUERY' && event.messageName === 'PUT_VALUE') { + counterAll++ + } + + if (event.name === 'QUERY_ERROR') { + counterErrors++ + } + } + + // verify if we were able to put to enough peers + const minPeers = options.minPeers || counterAll // Ensure we have a default `minPeers` + const counterSuccess = counterAll - counterErrors + + if (counterSuccess < minPeers) { + const error = errCode(new Error(`Failed to put value to enough peers: ${counterSuccess}/${minPeers}`), 'ERR_NOT_ENOUGH_PUT_PEERS') + log.error(error) + throw error + } + } + + /** + * Get the value that corresponds to the passed key + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * get (key, options = {}) { // eslint-disable-line require-await + let queriedPeers = false + + for await (const event of merge( + this._lan.get(key, options), + this._wan.get(key, options) + )) { + yield event + + if (event.name === 'DIALING_PEER') { + queriedPeers = true + } + + if (event.name === 'VALUE') { + queriedPeers = true + } + + if (event.name === 'SENDING_QUERY') { + queriedPeers = true + } + } + + if (!queriedPeers) { + throw errCode(new Error('No peers found in routing table!'), 'ERR_NO_PEERS_IN_ROUTING_TABLE') + } + } + + /** + * Remove the given key from the local datastore + * + * @param {Uint8Array} key + */ + async removeLocal (key) { + log(`removeLocal: ${uint8ArrayToString(key, 'base32')}`) + const dsKey = utils.bufferToKey(key) + + try { + await this._datastore.delete(dsKey) + } catch (/** @type {any} */ err) { + if (err.code === 'ERR_NOT_FOUND') { + return undefined + } + throw err + } + } + + // ----------- Content Routing + + /** + * Announce to the network that we can provide given key's value + * + * @param {CID} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + */ + async * provide (key, options = {}) { // eslint-disable-line require-await + let sent = 0 + let success = 0 + const errors = [] + + for await (const event of merge( + this._lan.provide(key, options), + this._wan.provide(key, options) + )) { + yield event + + if (event.name === 'SENDING_QUERY') { + sent++ + } + + if (event.name === 'QUERY_ERROR') { + errors.push(event.error) + } + + if (event.name === 'PEER_RESPONSE' && event.messageName === 'ADD_PROVIDER') { + log('sent provider record for %s to %p', key, event.from.id) + success++ + } + } + + if (success === 0) { + if (errors.length) { + // if all sends failed, throw an error to inform the caller + throw errCode(new Error(`Failed to provide to ${errors.length} of ${sent} peers`), 'ERR_PROVIDES_FAILED', { errors }) + } + + throw errCode(new Error('Failed to provide - no peers found'), 'ERR_PROVIDES_FAILED') + } + } + + /** + * Search the dht for up to `K` providers of the given CID. + * + * @param {CID} key + * @param {object} [options] - findProviders options + * @param {number} [options.maxNumProviders=5] - maximum number of providers to find + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * findProviders (key, options = { maxNumProviders: 5 }) { + yield * merge( + this._lan.findProviders(key, options), + this._wan.findProviders(key, options) + ) + } + + // ----------- Peer Routing ----------- + + /** + * Search for a peer with the given ID + * + * @param {PeerId} id + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * findPeer (id, options = {}) { // eslint-disable-line require-await + let queriedPeers = false + + for await (const event of merge( + this._lan.findPeer(id, options), + this._wan.findPeer(id, options) + )) { + yield event + + if (event.name === 'SENDING_QUERY' || event.name === 'FINAL_PEER') { + queriedPeers = true + } + } + + if (!queriedPeers) { + throw errCode(new Error('Peer lookup failed'), 'ERR_LOOKUP_FAILED') + } + } + + /** + * Kademlia 'node lookup' operation. + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * getClosestPeers (key, options = {}) { + yield * merge( + this._lan.getClosestPeers(key, options), + this._wan.getClosestPeers(key, options) + ) + } + + /** + * Get the public key for the given peer id + * + * @param {PeerId} peer + * @param {object} [options] + * @param {AbortSignal} [options.signal] + */ + async getPublicKey (peer, options = {}) { + log('getPublicKey %p', peer) + + // local check + const peerData = this._libp2p.peerStore.get(peer) + + if (peerData && peerData.id.pubKey) { + log('getPublicKey: found local copy') + return peerData.id.pubKey + } + + // try the node directly + const pks = await Promise.all([ + this._lan.getPublicKey(peer, options), + this._wan.getPublicKey(peer, options) + ]) + + if (pks[0] && pks[1] && !pks[0].equals(pks[1])) { + throw errCode(new Error('Inconsistent public key loaded from wan and lan DHTs'), 'ERR_FAILED_TO_LOAD_KEY') + } + + const pk = pks[0] || pks[1] + + if (!pk) { + throw errCode(new Error('Failed to load public key'), 'ERR_FAILED_TO_LOAD_KEY') + } + + const peerId = new PeerId(peer.id, undefined, pk) + const addrs = ((peerData && peerData.addresses) || []).map((address) => address.multiaddr) + this._libp2p.peerStore.addressBook.add(peerId, addrs) + this._libp2p.peerStore.keyBook.set(peerId, pk) + + return pk + } + + async refreshRoutingTable () { + await Promise.all([ + this._lan.refreshRoutingTable(), + this._wan.refreshRoutingTable() + ]) + } +} + +module.exports = { + DualKadDHT +} diff --git a/src/errors.js b/src/errors.js deleted file mode 100644 index e69de29b..00000000 diff --git a/src/index.js b/src/index.js index d09dff20..003bbd63 100644 --- a/src/index.js +++ b/src/index.js @@ -1,579 +1,32 @@ 'use strict' -const { EventEmitter } = require('events') -const errcode = require('err-code') - -const libp2pRecord = require('libp2p-record') -const { MemoryDatastore } = require('datastore-core/memory') -const { equals: uint8ArrayEquals } = require('uint8arrays/equals') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - -const RoutingTable = require('./routing-table') -const utils = require('./utils') -const c = require('./constants') -const Network = require('./network') -const contentFetching = require('./content-fetching') -const contentRouting = require('./content-routing') -const peerRouting = require('./peer-routing') -const Message = require('./message') -const Providers = require('./providers') -const QueryManager = require('./query-manager') - -const Record = libp2pRecord.Record - -/** - * @typedef {*} Libp2p - * @typedef {*} PeerStore - * @typedef {import('peer-id')} PeerId - * @typedef {import('interface-datastore').Datastore} Datastore - * @typedef {*} Dialer - * @typedef {*} Registrar - * @typedef {import('multiformats/cid').CID} CID - * @typedef {import('multiaddr').Multiaddr} Multiaddr - * @typedef {object} PeerData - * @property {PeerId} id - * @property {Multiaddr[]} multiaddrs - */ +const { KadDHT } = require('./kad-dht') +const { DualKadDHT } = require('./dual-kad-dht') /** - * A DHT implementation modeled after Kademlia with S/Kademlia modifications. - * Original implementation in go: https://github.com/libp2p/go-libp2p-kad-dht. + * @typedef {import('./types').DHT} DHT + * @typedef {import('./kad-dht').KadDHTOps} KadDHTOps */ -class KadDHT extends EventEmitter { - /** - * Create a new KadDHT. - * - * @param {Object} props - * @param {Libp2p} props.libp2p - the libp2p instance - * @param {Dialer} props.dialer - libp2p dialer instance - * @param {PeerId} props.peerId - peer's peerId - * @param {PeerStore} props.peerStore - libp2p peerStore - * @param {Registrar} props.registrar - libp2p registrar instance - * @param {string} [props.protocolPrefix = '/ipfs'] - libp2p registrar handle protocol - * @param {boolean} [props.forceProtocolLegacy = false] - WARNING: this is not recommended and should only be used for legacy purposes - * @param {number} props.kBucketSize - k-bucket size (default 20) - * @param {boolean} props.clientMode - If true, the DHT will not respond to queries. This should be true if your node will not be dialable. (default: false) - * @param {number} props.concurrency - alpha concurrency of queries (default 3) - * @param {Datastore} props.datastore - datastore (default MemoryDatastore) - * @param {object} props.validators - validators object with namespace as keys and function(key, record, callback) - * @param {object} props.selectors - selectors object with namespace as keys and function(key, records) - * @param {function(import('libp2p-record').Record, PeerId): void} [props.onPut] - Called when an entry is added to or changed in the datastore - * @param {function(import('libp2p-record').Record): void} [props.onRemove] - Called when an entry is removed from the datastore - */ - constructor ({ - libp2p, - dialer, - peerId, - peerStore, - registrar, - protocolPrefix = '/ipfs', - forceProtocolLegacy = false, - datastore = new MemoryDatastore(), - kBucketSize = c.K, - clientMode = false, - concurrency = c.ALPHA, - validators = {}, - selectors = {}, - onPut = () => {}, - onRemove = () => {} - }) { - super() - - if (!dialer) { - throw new Error('libp2p-kad-dht requires an instance of Dialer') - } - - /** - * Local reference to the libp2p instance. May be undefined. - * - * @type {Libp2p} - */ - this.libp2p = libp2p - - /** - * Local reference to the libp2p dialer instance - * - * @type {Dialer} - */ - this.dialer = dialer - - /** - * Local peer-id - * - * @type {PeerId} - */ - this.peerId = peerId - - /** - * Local PeerStore - * - * @type {PeerStore} - */ - this.peerStore = peerStore - - /** - * Local peer info - * - * @type {Registrar} - */ - this.registrar = registrar - - /** - * Registrar protocol - * - * @type {string} - */ - this.protocol = protocolPrefix + (forceProtocolLegacy ? '' : c.PROTOCOL_DHT) - - /** - * k-bucket size - * - * @type {number} - */ - this.kBucketSize = kBucketSize - - this._clientMode = clientMode - - /** - * ALPHA concurrency at which each query path with run, defaults to 3 - * - * @type {number} - */ - this.concurrency = concurrency - - /** - * Number of disjoint query paths to use - * This is set to `kBucketSize`/2 per the S/Kademlia paper - * - * @type {number} - */ - this.disjointPaths = Math.ceil(this.kBucketSize / 2) - - /** - * The routing table. - * - * @type {RoutingTable} - */ - this.routingTable = new RoutingTable(this, { kBucketSize: this.kBucketSize }) - - /** - * Reference to the datastore, uses an in-memory store if none given. - * - * @type {Datastore} - */ - this.datastore = datastore - - /** - * Provider management - * - * @type {Providers} - */ - this.providers = new Providers(this.datastore, this.peerId) - - this.validators = { - pk: libp2pRecord.validator.validators.pk, - ...validators - } - - this.selectors = { - pk: libp2pRecord.selection.selectors.pk, - ...selectors - } - - this.network = new Network(this) - - this._log = utils.logger(this.peerId) - - /** - * Keeps track of running queries - * - * @type {QueryManager} - */ - this._queryManager = new QueryManager() - - this._running = false - - // DHT components - this.contentFetching = contentFetching(this) - this.contentRouting = contentRouting(this) - this.peerRouting = peerRouting(this) - - // datastore events - this.onPut = onPut - this.onRemove = onRemove - } - - /** - * Is this DHT running. - */ - get isStarted () { - return this._running - } - - /** - * Start listening to incoming connections. - */ - start () { - this._running = true - - return Promise.all([ - this.providers.start(), - this._queryManager.start(), - this.network.start(), - this.routingTable.start() - ]) - } - - /** - * Stop accepting incoming connections and sending outgoing - * messages. - */ - stop () { - this._running = false - - return Promise.all([ - this.providers.stop(), - this._queryManager.stop(), - this.network.stop(), - this.routingTable.stop() - ]) - } - - /** - * Store the given key/value pair in the DHT. - * - * @param {Uint8Array} key - * @param {Uint8Array} value - * @param {Object} [options] - put options - * @param {number} [options.minPeers] - minimum number of peers required to successfully put (default: closestPeers.length) - * @returns {Promise} - */ - async put (key, value, options = {}) { // eslint-disable-line require-await - return this.contentFetching.put(key, value, options) - } - - /** - * Get the value to the given key. - * Times out after 1 minute by default. - * - * @param {Uint8Array} key - * @param {Object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - * @returns {Promise} - */ - async get (key, options = {}) { // eslint-disable-line require-await - return this.contentFetching.get(key, options) - } - - /** - * Get the `n` values to the given key without sorting. - * - * @param {Uint8Array} key - * @param {number} nvals - * @param {Object} [options] - get options - * @param {number} [options.timeout] - optional timeout (default: 60000) - */ - async getMany (key, nvals, options = {}) { // eslint-disable-line require-await - return this.contentFetching.getMany(key, nvals, options) - } - - /** - * Remove the given key from the local datastore. - * - * @param {Uint8Array} key - */ - async removeLocal (key) { - this._log(`removeLocal: ${uint8ArrayToString(key, 'base32')}`) - const dsKey = utils.bufferToKey(key) - - try { - await this.datastore.delete(dsKey) - } catch (/** @type {any} */ err) { - if (err.code === 'ERR_NOT_FOUND') { - return undefined - } - throw err - } - } - - /** - * @param {Uint8Array} key - * @param {Uint8Array} value - */ - async _putLocal (key, value) { - this._log(`_putLocal: ${uint8ArrayToString(key, 'base32')}`) - const dsKey = utils.bufferToKey(key) - - await this.datastore.put(dsKey, value) - } - - // ----------- Content Routing - - /** - * Announce to the network that we can provide given key's value. - * - * @param {CID} key - * @returns {Promise} - */ - async provide (key) { // eslint-disable-line require-await - return this.contentRouting.provide(key) - } - - /** - * Search the dht for up to `K` providers of the given CID. - * - * @param {CID} key - * @param {Object} [options] - findProviders options - * @param {number} [options.timeout=60000] - how long the query should maximally run, in milliseconds (default: 60000) - * @param {number} [options.maxNumProviders=5] - maximum number of providers to find - * @returns {AsyncIterable<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async * findProviders (key, options = { timeout: 6000, maxNumProviders: 5 }) { - for await (const peerData of this.contentRouting.findProviders(key, options)) { - yield peerData - } - } - // ----------- Peer Routing ----------- - - /** - * Search for a peer with the given ID. - * - * @param {PeerId} id - * @param {Object} [options] - findPeer options - * @param {number} [options.timeout=60000] - how long the query should maximally run, in milliseconds (default: 60000) - * @returns {Promise<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async findPeer (id, options = { timeout: 60000 }) { // eslint-disable-line require-await - return this.peerRouting.findPeer(id, options) - } - - /** - * Kademlia 'node lookup' operation. - * - * @param {Uint8Array} key - * @param {Object} [options] - * @param {boolean} [options.shallow = false] - shallow query - */ - async * getClosestPeers (key, options = { shallow: false }) { - yield * this.peerRouting.getClosestPeers(key, options) - } - - /** - * Get the public key for the given peer id. - * - * @param {PeerId} peer - */ - getPublicKey (peer) { - return this.peerRouting.getPublicKey(peer) - } - - // ----------- Discovery ----------- - - /** - * @param {PeerId} peerId - * @param {Multiaddr[]} multiaddrs - */ - _peerDiscovered (peerId, multiaddrs) { - this.emit('peer', { - id: peerId, - multiaddrs - }) - } - - // ----------- Internals ----------- - - /** - * Returns the routing tables closest peers, for the key of - * the message. - * - * @param {Message} msg - */ - async _nearestPeersToQuery (msg) { - const key = await utils.convertBuffer(msg.key) - const ids = this.routingTable.closestPeers(key, this.kBucketSize) - - return ids.map((p) => { - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peer = this.peerStore.get(p) - - return { - id: p, - multiaddrs: peer ? peer.addresses.map((address) => address.multiaddr) : [] - } - }) - } - - /** - * Get the nearest peers to the given query, but iff closer - * than self. - * - * @param {Message} msg - * @param {PeerId} peerId - */ - async _betterPeersToQuery (msg, peerId) { - this._log('betterPeersToQuery') - const closer = await this._nearestPeersToQuery(msg) - - return closer.filter((closer) => { - if (this._isSelf(closer.id)) { - // Should bail, not sure - this._log.error('trying to return self as closer') - return false - } - - return !closer.id.isEqual(peerId) - }) - } - - /** - * Try to fetch a given record by from the local datastore. - * Returns the record iff it is still valid, meaning - * - it was either authored by this node, or - * - it was received less than `MAX_RECORD_AGE` ago. - * - * @param {Uint8Array} key - */ - - async _checkLocalDatastore (key) { - this._log(`checkLocalDatastore: ${uint8ArrayToString(key)} %b`, key) - const dsKey = utils.bufferToKey(key) - - // Fetch value from ds - let rawRecord - try { - rawRecord = await this.datastore.get(dsKey) - } catch (/** @type {any} */ err) { - if (err.code === 'ERR_NOT_FOUND') { - return undefined - } - throw err - } - - // Create record from the returned bytes - const record = Record.deserialize(rawRecord) - - if (!record) { - throw errcode(new Error('Invalid record'), 'ERR_INVALID_RECORD') - } - - // Check validity: compare time received with max record age - if (record.timeReceived == null || - utils.now() - record.timeReceived.getTime() > c.MAX_RECORD_AGE) { - // If record is bad delete it and return - await this.datastore.delete(dsKey) - this.onRemove(record) - return undefined - } - - // Record is valid - return record - } - - /** - * Add the peer to the routing table and update it in the peerStore. - * - * @param {PeerId} peerId - */ - async _add (peerId) { - await this.routingTable.add(peerId) - } - - /** - * Verify a record without searching the DHT. - * - * @param {import('libp2p-record').Record} record - */ - async _verifyRecordLocally (record) { - this._log('verifyRecordLocally') - - await libp2pRecord.validator.verifyRecord(this.validators, record) - } - - /** - * Is the given peer id our PeerId? - * - * @param {PeerId} other - */ - _isSelf (other) { - return other && uint8ArrayEquals(this.peerId.id, other.id) - } - - /** - * Store the given key/value pair at the peer `target`. - * - * @param {Uint8Array} key - * @param {Uint8Array} rec - encoded record - * @param {PeerId} target - */ - async _putValueToPeer (key, rec, target) { - const msg = new Message(Message.TYPES.PUT_VALUE, key, 0) - msg.record = Record.deserialize(rec) - - const resp = await this.network.sendRequest(target, msg) - - if (resp.record && !uint8ArrayEquals(resp.record.value, Record.deserialize(rec).value)) { - throw errcode(new Error('value not put correctly'), 'ERR_PUT_VALUE_INVALID') - } - } - - /** - * Query a particular peer for the value for the given key. - * It will either return the value or a list of closer peers. - * - * Note: The peerStore is updated with new addresses found for the given peer. - * - * @param {PeerId} peer - * @param {Uint8Array} key - */ - async _getValueOrPeers (peer, key) { - const msg = await this._getValueSingle(peer, key) - - const peers = msg.closerPeers - const record = msg.record - - if (record) { - // We have a record - try { - await this._verifyRecordOnline(record) - } catch (/** @type {any} */ err) { - const errMsg = 'invalid record received, discarded' - this._log(errMsg) - throw errcode(new Error(errMsg), 'ERR_INVALID_RECORD') - } - - return { record, peers } - } - - if (peers.length > 0) { - return { peers } - } - - throw errcode(new Error('Not found'), 'ERR_NOT_FOUND') - } - - /** - * Get a value via rpc call for the given parameters. - * - * @param {PeerId} peer - * @param {Uint8Array} key - */ - async _getValueSingle (peer, key) { // eslint-disable-line require-await - const msg = new Message(Message.TYPES.GET_VALUE, key, 0) - return this.network.sendRequest(peer, msg) - } - - /** - * Verify a record, fetching missing public keys from the network. - * Calls back with an error if the record is invalid. - * - * @param {import('libp2p-record').Record} record - * @returns {Promise} - */ - async _verifyRecordOnline (record) { - await libp2pRecord.validator.verifyRecord(this.validators, record) +module.exports = { + /** + * @param {KadDHTOps} opts + * @returns {DHT} + */ + create: (opts) => { + return new DualKadDHT( + new KadDHT({ + ...opts, + protocol: '/ipfs/kad/1.0.0', + lan: false + }), + new KadDHT({ + ...opts, + protocol: '/ipfs/lan/kad/1.0.0', + clientMode: false, + lan: true + }), + opts.libp2p + ) } } - -module.exports = KadDHT -module.exports.multicodec = '/ipfs' + c.PROTOCOL_DHT diff --git a/src/kad-dht.js b/src/kad-dht.js new file mode 100644 index 00000000..b9558052 --- /dev/null +++ b/src/kad-dht.js @@ -0,0 +1,466 @@ +'use strict' + +const { EventEmitter } = require('events') +const crypto = require('libp2p-crypto') +const libp2pRecord = require('libp2p-record') +const { MemoryDatastore } = require('datastore-core/memory') +const { RoutingTable } = require('./routing-table') +const { RoutingTableRefresh } = require('./routing-table/refresh') +const utils = require('./utils') +const { + K, + QUERY_SELF_INTERVAL +} = require('./constants') +const { Network } = require('./network') +const { ContentFetching } = require('./content-fetching') +const { ContentRouting } = require('./content-routing') +const { PeerRouting } = require('./peer-routing') +const { Providers } = require('./providers') +const { QueryManager } = require('./query/manager') +const { RPC } = require('./rpc') +const { TopologyListener } = require('./topology-listener') +const { QuerySelf } = require('./query-self') +const { + removePrivateAddresses, + removePublicAddresses +} = require('./utils') + +/** + * @typedef {import('libp2p')} Libp2p + * @typedef {import('libp2p/src/peer-store')} PeerStore + * @typedef {import('interface-datastore').Datastore} Datastore + * @typedef {import('libp2p/src/dialer')} Dialer + * @typedef {import('libp2p/src/registrar')} Registrar + * @typedef {import('multiformats/cid').CID} CID + * @typedef {import('multiaddr').Multiaddr} Multiaddr + * @typedef {import('peer-id')} PeerId + * @typedef {import('./types').DHT} DHT + * @typedef {import('./types').PeerData} PeerData + * @typedef {import('./types').QueryEvent} QueryEvent + * @typedef {import('./types').SendingQueryEvent} SendingQueryEvent + * @typedef {import('./types').PeerResponseEvent} PeerResponseEvent + * @typedef {import('./types').FinalPeerEvent} FinalPeerEvent + * @typedef {import('./types').QueryErrorEvent} QueryErrorEvent + * @typedef {import('./types').ProviderEvent} ProviderEvent + * @typedef {import('./types').ValueEvent} ValueEvent + * @typedef {import('./types').AddingPeerEvent} AddingPeerEvent + * @typedef {import('./types').DialingPeerEvent} DialingPeerEvent + * + * @typedef {object} KadDHTOps + * @property {Libp2p} libp2p - the libp2p instance + * @property {string} [protocol = '/ipfs/kad/1.0.0'] - libp2p registrar handle protocol + * @property {number} kBucketSize - k-bucket size (default 20) + * @property {boolean} clientMode - If true, the DHT will not respond to queries. This should be true if your node will not be dialable. (default: false) + * @property {import('libp2p-interfaces/src/types').DhtValidators} validators - validators object with namespace as keys and function(key, record, callback) + * @property {object} selectors - selectors object with namespace as keys and function(key, records) + * @property {number} querySelfInterval - how often to search the network for peers close to ourselves + * @property {boolean} lan + * @property {PeerData[]} bootstrapPeers + */ + +/** + * A DHT implementation modelled after Kademlia with S/Kademlia modifications. + * Original implementation in go: https://github.com/libp2p/go-libp2p-kad-dht. + */ +class KadDHT extends EventEmitter { + /** + * Create a new KadDHT. + * + * @param {KadDHTOps} opts + */ + constructor ({ + libp2p, + kBucketSize = K, + clientMode = true, + validators = {}, + selectors = {}, + querySelfInterval = QUERY_SELF_INTERVAL, + lan = true, + protocol = '/ipfs/lan/kad/1.0.0', + bootstrapPeers = [] + }) { + super() + + this._running = false + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}`) + + /** + * Local reference to the libp2p instance + * + * @type {Libp2p} + */ + this._libp2p = libp2p + + /** + * Registrar protocol + * + * @type {string} + */ + this._protocol = protocol + + /** + * k-bucket size + * + * @type {number} + */ + this._kBucketSize = kBucketSize + + /** + * Whether we are in client or server mode + */ + this._clientMode = clientMode + + /** + * Will be added to the routing table on startup + */ + this._bootstrapPeers = bootstrapPeers + + /** + * The routing table. + * + * @type {RoutingTable} + */ + this._routingTable = new RoutingTable({ + peerId: libp2p.peerId, + dialer: libp2p, + kBucketSize, + lan + }) + + /** + * Reference to the datastore, uses an in-memory store if none given. + * + * @type {Datastore} + */ + this._datastore = libp2p.datastore || new MemoryDatastore() + + /** + * Provider management + * + * @type {Providers} + */ + this._providers = new Providers(this._datastore) + + /** + * @type {boolean} + */ + this._lan = lan + + this._validators = { + pk: libp2pRecord.validator.validators.pk, + ...validators + } + + this._selectors = { + pk: libp2pRecord.selection.selectors.pk, + ...selectors + } + + this._network = new Network({ + dialer: libp2p, + protocol: this._protocol, + lan + }) + /** + * Keeps track of running queries + * + * @type {QueryManager} + */ + this._queryManager = new QueryManager({ + peerId: libp2p.peerId, + // Number of disjoint query paths to use - This is set to `kBucketSize/2` per the S/Kademlia paper + disjointPaths: Math.ceil(kBucketSize / 2), + lan + }) + + // DHT components + this._peerRouting = new PeerRouting({ + peerId: libp2p.peerId, + routingTable: this._routingTable, + peerStore: libp2p.peerStore, + network: this._network, + validators: this._validators, + queryManager: this._queryManager, + lan + }) + this._contentFetching = new ContentFetching({ + peerId: libp2p.peerId, + datastore: this._datastore, + validators: this._validators, + selectors: this._selectors, + peerRouting: this._peerRouting, + queryManager: this._queryManager, + routingTable: this._routingTable, + network: this._network, + lan + }) + this._contentRouting = new ContentRouting({ + peerId: libp2p.peerId, + network: this._network, + peerRouting: this._peerRouting, + queryManager: this._queryManager, + routingTable: this._routingTable, + providers: this._providers, + peerStore: libp2p.peerStore, + lan + }) + this._routingTableRefresh = new RoutingTableRefresh({ + peerRouting: this._peerRouting, + routingTable: this._routingTable, + lan + }) + this._rpc = new RPC({ + routingTable: this._routingTable, + peerId: libp2p.peerId, + providers: this._providers, + peerStore: libp2p.peerStore, + addressable: libp2p, + peerRouting: this._peerRouting, + datastore: this._datastore, + validators: this._validators, + lan + }) + this._topologyListener = new TopologyListener({ + registrar: libp2p.registrar, + protocol: this._protocol + }) + this._querySelf = new QuerySelf({ + peerId: libp2p.peerId, + peerRouting: this._peerRouting, + interval: querySelfInterval, + lan + }) + + // handle peers being discovered during processing of DHT messages + this._network.on('peer', (peerData) => { + this._routingTable.add(peerData.id).catch(err => { + this._log.error(`Could not add ${peerData.id} to routing table`, err) + }) + + this.emit('peer', peerData) + }) + + // handle peers being discovered via other peer discovery mechanisms + this._topologyListener.on('peer', async (peerId) => { + this._routingTable.add(peerId).catch(err => { + this._log.error(`Could not add ${peerId} to routing table`, err) + }) + }) + } + + /** + * @param {PeerData} peerData + */ + async onPeerConnect (peerData) { + if (this._lan) { + peerData = removePublicAddresses(peerData) + } else { + peerData = removePrivateAddresses(peerData) + } + + if (!peerData.multiaddrs.length) { + return + } + + try { + const has = await this._routingTable.find(peerData.id) + + if (!has) { + await this._routingTable.add(peerData.id) + } + } catch (err) { + this._log.error('Could not add %p to routing table', peerData.id, err) + } + } + + /** + * Is this DHT running. + */ + isStarted () { + return this._running + } + + /** + * Whether we are in client or server mode + */ + enableServerMode () { + this._log('enabling server mode') + this._clientMode = false + this._libp2p.handle(this._protocol, this._rpc.onIncomingStream.bind(this._rpc)) + } + + /** + * Whether we are in client or server mode + */ + enableClientMode () { + this._log('enabling client mode') + this._clientMode = true + this._libp2p.unhandle(this._protocol) + } + + /** + * Start listening to incoming connections. + */ + async start () { + this._running = true + + // Only respond to queries when not in client mode + if (this._clientMode) { + this.enableClientMode() + } else { + this.enableServerMode() + } + + await Promise.all([ + this._providers.start(), + this._queryManager.start(), + this._network.start(), + this._routingTable.start(), + this._routingTableRefresh.start(), + this._topologyListener.start(), + this._querySelf.start() + ]) + + await Promise.all( + this._bootstrapPeers.map(peerData => this._routingTable.add(peerData.id)) + ) + + await this.refreshRoutingTable() + } + + /** + * Stop accepting incoming connections and sending outgoing + * messages. + */ + async stop () { + this._running = false + + await Promise.all([ + this._providers.stop(), + this._queryManager.stop(), + this._network.stop(), + this._routingTable.stop(), + this._routingTableRefresh.stop(), + this._topologyListener.stop(), + this._querySelf.stop() + ]) + } + + /** + * Store the given key/value pair in the DHT + * + * @param {Uint8Array} key + * @param {Uint8Array} value + * @param {object} [options] - put options + * @param {AbortSignal} [options.signal] + * @param {number} [options.minPeers] - minimum number of peers required to successfully put (default: closestPeers.length) + */ + async * put (key, value, options = {}) { // eslint-disable-line require-await + yield * this._contentFetching.put(key, value, options) + } + + /** + * Get the value that corresponds to the passed key + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * get (key, options = {}) { // eslint-disable-line require-await + yield * this._contentFetching.get(key, options) + } + + // ----------- Content Routing + + /** + * Announce to the network that we can provide given key's value + * + * @param {CID} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + */ + async * provide (key, options = {}) { // eslint-disable-line require-await + yield * this._contentRouting.provide(key, this._libp2p.multiaddrs, options) + } + + /** + * Search the dht for up to `K` providers of the given CID. + * + * @param {CID} key + * @param {object} [options] - findProviders options + * @param {number} [options.maxNumProviders=5] - maximum number of providers to find + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * findProviders (key, options = { maxNumProviders: 5 }) { + yield * this._contentRouting.findProviders(key, options) + } + + // ----------- Peer Routing ----------- + + /** + * Search for a peer with the given ID + * + * @param {PeerId} id + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * findPeer (id, options = {}) { // eslint-disable-line require-await + yield * this._peerRouting.findPeer(id, options) + } + + /** + * Kademlia 'node lookup' operation. + * + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * getClosestPeers (key, options = {}) { + yield * this._peerRouting.getClosestPeers(key, options) + } + + /** + * Get the public key for the given peer id + * + * @param {PeerId} peer + * @param {object} [options] + * @param {AbortSignal} [options.signal] + */ + async getPublicKey (peer, options = {}) { + this._log('getPublicKey %p', peer) + + // try the node directly + let pk + + for await (const event of this._peerRouting.getPublicKeyFromNode(peer, options)) { + if (event.name === 'VALUE') { + pk = crypto.keys.unmarshalPublicKey(event.value) + } + } + + if (!pk) { + // try dht directly + const pkKey = utils.keyForPublicKey(peer) + + for await (const event of this.get(pkKey, options)) { + if (event.name === 'VALUE') { + pk = crypto.keys.unmarshalPublicKey(event.value) + } + } + } + + return pk + } + + async refreshRoutingTable () { + await this._routingTableRefresh.refreshTable(true) + } +} + +module.exports = { + KadDHT +} diff --git a/src/message/index.js b/src/message/index.js index 183477d2..b7122a1d 100644 --- a/src/message/index.js +++ b/src/message/index.js @@ -7,6 +7,7 @@ const Proto = require('./dht') const MESSAGE_TYPE = Proto.Message.MessageType const CONNECTION_TYPE = Proto.Message.ConnectionType +const MESSAGE_TYPE_LOOKUP = Object.keys(MESSAGE_TYPE) /** * @typedef {0|1|2|3|4} ConnectionType @@ -16,7 +17,7 @@ const CONNECTION_TYPE = Proto.Message.ConnectionType * @property {Uint8Array[]} addrs * @property {ConnectionType} connection * - * @typedef {import('../index').PeerData} PeerData + * @typedef {import('../types').PeerData} PeerData */ /** @@ -136,4 +137,6 @@ function fromPbPeer (peer) { } } -module.exports = Message +module.exports.Message = Message +module.exports.MESSAGE_TYPE = MESSAGE_TYPE +module.exports.MESSAGE_TYPE_LOOKUP = MESSAGE_TYPE_LOOKUP diff --git a/src/network.js b/src/network.js index 7f7e87be..f96793e7 100644 --- a/src/network.js +++ b/src/network.js @@ -1,41 +1,46 @@ 'use strict' const errcode = require('err-code') - const { pipe } = require('it-pipe') const lp = require('it-length-prefixed') -const pTimeout = require('p-timeout') -const { consume } = require('streaming-iterables') +const drain = require('it-drain') const first = require('it-first') - -const MulticodecTopology = require('libp2p-interfaces/src/topology/multicodec-topology') - -const rpc = require('./rpc') -const c = require('./constants') -const Message = require('./message') +const { Message, MESSAGE_TYPE_LOOKUP } = require('./message') const utils = require('./utils') +const { EventEmitter } = require('events') +const { + dialingPeerEvent, + sendingQueryEvent, + peerResponseEvent, + queryErrorEvent +} = require('./query/events') /** * @typedef {import('peer-id')} PeerId * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream + * @typedef {import('./types').QueryEvent} QueryEvent + * @typedef {import('./types').PeerData} PeerData */ /** * Handle network operations for the dht */ -class Network { +class Network extends EventEmitter { /** * Create a new network * - * @param {import('./index')} dht + * @param {object} params + * @param {import('./types').Dialer} params.dialer + * @param {string} params.protocol + * @param {boolean} params.lan */ - constructor (dht) { - this.dht = dht - this.readMessageTimeout = c.READ_MESSAGE_TIMEOUT - this._log = utils.logger(this.dht.peerId, 'net') - this._rpc = rpc(this.dht) - this._onPeerConnected = this._onPeerConnected.bind(this) + constructor ({ dialer, protocol, lan }) { + super() + + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:network`) this._running = false + this._dialer = dialer + this._protocol = protocol } /** @@ -46,42 +51,14 @@ class Network { return } - if (!this.dht.isStarted) { - throw errcode(new Error('Can not start network'), 'ERR_CANNOT_START_NETWORK') - } - this._running = true - - // Only respond to queries when not in client mode - if (this.dht._clientMode === false) { - // Incoming streams - this.dht.registrar.handle(this.dht.protocol, this._rpc) - } - - // register protocol with topology - const topology = new MulticodecTopology({ - multicodecs: [this.dht.protocol], - handlers: { - onConnect: this._onPeerConnected, - onDisconnect: () => {} - } - }) - this._registrarId = this.dht.registrar.register(topology) } /** * Stop all network activity */ stop () { - if (!this.dht.isStarted && !this.isStarted) { - return - } this._running = false - - // unregister protocol and handlers - if (this._registrarId) { - this.dht.registrar.unregister(this._registrarId) - } } /** @@ -94,49 +71,35 @@ class Network { } /** - * Are all network components there? - * - * @type {boolean} - */ - get isConnected () { - // TODO add a way to check if switch has started or not - return this.dht.isStarted && this.isStarted - } - - /** - * Registrar notifies a connection successfully with dht protocol. + * Send a request and record RTT for latency measurements * - * @param {PeerId} peerId - remote peer id - */ - async _onPeerConnected (peerId) { - await this.dht._add(peerId) - this._log('added to the routing table: %s', peerId.toB58String()) - } - - /** - * Send a request and record RTT for latency measurements. - * - * @async * @param {PeerId} to - The peer that should receive a message - * @param {Message} msg - The message to send. + * @param {Message} msg - The message to send + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - async sendRequest (to, msg) { - // TODO: record latency - if (!this.isConnected) { - throw errcode(new Error('Network is offline'), 'ERR_NETWORK_OFFLINE') - } + async * sendRequest (to, msg, options = {}) { + this._log('sending %s to %p', MESSAGE_TYPE_LOOKUP[msg.type], to) - const id = to.toB58String() - this._log('sending to: %s', id) + try { + yield dialingPeerEvent({ peer: to }) - let conn = this.dht.registrar.connectionManager.get(to) - if (!conn) { - conn = await this.dht.dialer.connectToPeer(to) - } + const { stream } = await this._dialer.dialProtocol(to, this._protocol, options) - const { stream } = await conn.newStream(this.dht.protocol) + yield sendingQueryEvent({ to, type: msg.type }) - return this._writeReadMessage(stream, msg.serialize()) + const response = await this._writeReadMessage(stream, msg.serialize(), options) + + yield peerResponseEvent({ + from: to, + messageType: response.type, + closer: response.closerPeers, + providers: response.providerPeers, + record: response.record + }) + } catch (/** @type {any} */ err) { + yield queryErrorEvent({ from: to, error: err }) + } } /** @@ -144,82 +107,88 @@ class Network { * * @param {PeerId} to * @param {Message} msg + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - async sendMessage (to, msg) { - if (!this.isConnected) { - throw errcode(new Error('Network is offline'), 'ERR_NETWORK_OFFLINE') - } + async * sendMessage (to, msg, options = {}) { + this._log('sending %s to %p', MESSAGE_TYPE_LOOKUP[msg.type], to) - const id = to.toB58String() - this._log('sending to: %s', id) + yield dialingPeerEvent({ peer: to }) - let conn = this.dht.registrar.connectionManager.get(to) - if (!conn) { - conn = await this.dht.dialer.connectToPeer(to) - } - const { stream } = await conn.newStream(this.dht.protocol) + const { stream } = await this._dialer.dialProtocol(to, this._protocol, options) - return this._writeMessage(stream, msg.serialize()) + yield sendingQueryEvent({ to, type: msg.type }) + + try { + await this._writeMessage(stream, msg.serialize(), options) + + yield peerResponseEvent({ from: to, messageType: msg.type }) + } catch (/** @type {any} */ err) { + yield queryErrorEvent({ from: to, error: err }) + } } /** - * Write a message and read its response. - * If no response is received after the specified timeout - * this will error out. + * Write a message to the given stream * * @param {MuxedStream} stream - the stream to use * @param {Uint8Array} msg - the message to send + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - async _writeReadMessage (stream, msg) { // eslint-disable-line require-await - return pTimeout( - writeReadMessage(stream, msg), - this.readMessageTimeout + async _writeMessage (stream, msg, options = {}) { + await pipe( + [msg], + lp.encode(), + stream, + drain ) } /** - * Write a message to the given stream. + * Write a message and read its response. + * If no response is received after the specified timeout + * this will error out. * * @param {MuxedStream} stream - the stream to use * @param {Uint8Array} msg - the message to send + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - _writeMessage (stream, msg) { - return pipe( + async _writeReadMessage (stream, msg, options = {}) { + const res = await pipe( [msg], lp.encode(), stream, - consume + lp.decode(), + /** + * @param {AsyncIterable} source + */ + async source => { + const buf = await first(source) + + if (buf) { + return buf.slice() + } + } ) - } -} -/** - * @param {MuxedStream} stream - * @param {Uint8Array} msg - */ -async function writeReadMessage (stream, msg) { - const res = await pipe( - [msg], - lp.encode(), - stream, - lp.decode(), - /** - * @param {AsyncIterable} source - */ - async source => { - const buf = await first(source) - - if (buf) { - return buf.slice() - } + if (res.length === 0) { + throw errcode(new Error('No message received'), 'ERR_NO_MESSAGE_RECEIVED') } - ) - if (res.length === 0) { - throw errcode(new Error('No message received'), 'ERR_NO_MESSAGE_RECEIVED') - } + const message = Message.deserialize(res) + + // tell any listeners about new peers we've seen + message.closerPeers.forEach(peerData => { + this.emit('peer', peerData) + }) + message.providerPeers.forEach(peerData => { + this.emit('peer', peerData) + }) - return Message.deserialize(res) + return message + } } -module.exports = Network +module.exports.Network = Network diff --git a/src/peer-list/index.js b/src/peer-list/index.js index d5b102f9..840aa5ef 100644 --- a/src/peer-list/index.js +++ b/src/peer-list/index.js @@ -2,7 +2,6 @@ /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../').PeerData} PeerData */ /** @@ -10,18 +9,18 @@ */ class PeerList { constructor () { - /** @type {PeerData[]} */ + /** @type {PeerId[]} */ this.list = [] } /** * Add a new peer. Returns `true` if it was a new one * - * @param {PeerData} peerData + * @param {PeerId} peerId */ - push (peerData) { - if (!this.has(peerData.id)) { - this.list.push(peerData) + push (peerId) { + if (!this.has(peerId)) { + this.list.push(peerId) return true } @@ -35,7 +34,7 @@ class PeerList { * @param {PeerId} peerId */ has (peerId) { - const match = this.list.find((i) => i.id.equals(peerId)) + const match = this.list.find((i) => i.equals(peerId)) return Boolean(match) } diff --git a/src/peer-list/limited-peer-list.js b/src/peer-list/limited-peer-list.js deleted file mode 100644 index a88255b4..00000000 --- a/src/peer-list/limited-peer-list.js +++ /dev/null @@ -1,37 +0,0 @@ -'use strict' - -const PeerList = require('.') - -/** - * @typedef {import('../').PeerData} PeerData - */ - -/** - * Like PeerList but with a length restriction. - */ -class LimitedPeerList extends PeerList { - /** - * Create a new limited peer list. - * - * @param {number} limit - */ - constructor (limit) { - super() - this.limit = limit - } - - /** - * Add a PeerData if it fits in the list - * - * @param {PeerData} peerData - */ - push (peerData) { - if (this.length < this.limit) { - return super.push(peerData) - } - - return false - } -} - -module.exports = LimitedPeerList diff --git a/src/peer-list/peer-distance-list.js b/src/peer-list/peer-distance-list.js index 2c9d943d..4009cb71 100644 --- a/src/peer-list/peer-distance-list.js +++ b/src/peer-list/peer-distance-list.js @@ -2,13 +2,11 @@ const utils = require('../utils') const pMap = require('p-map') -const { equals: uint8ArrayEquals } = require('uint8arrays/equals') const { compare: uint8ArrayCompare } = require('uint8arrays/compare') const { xor: uint8ArrayXor } = require('uint8arrays/xor') /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../').PeerData} PeerData */ /** @@ -49,7 +47,7 @@ class PeerDistanceList { * @param {PeerId} peerId */ async add (peerId) { - if (this.peerDistances.find(pd => uint8ArrayEquals(pd.peerId.id, peerId.id))) { + if (this.peerDistances.find(pd => pd.peerId.equals(peerId))) { return } diff --git a/src/peer-list/peer-queue.js b/src/peer-list/peer-queue.js deleted file mode 100644 index c62d8f4a..00000000 --- a/src/peer-list/peer-queue.js +++ /dev/null @@ -1,89 +0,0 @@ -'use strict' - -// @ts-ignore -const Heap = require('heap') -const { xor: uint8ArrayXor } = require('uint8arrays/xor') -const debug = require('debug') - -const utils = require('../utils') - -const log = debug('libp2p:dht:peer-queue') - -/** - * @typedef {import('peer-id')} PeerId - */ - -/** - * PeerQueue is a heap that sorts its entries (PeerIds) by their - * xor distance to the inital provided key. - */ -class PeerQueue { - /** - * Create from a given peer id. - * - * @param {PeerId} id - * @returns {Promise} - */ - static async fromPeerId (id) { - const key = await utils.convertPeerId(id) - - return new PeerQueue(key) - } - - /** - * Create from a given Uint8Array. - * - * @param {Uint8Array} keyBuffer - * @returns {Promise} - */ - static async fromKey (keyBuffer) { - const key = await utils.convertBuffer(keyBuffer) - - return new PeerQueue(key) - } - - /** - * Create a new PeerQueue. - * - * @param {Uint8Array} from - The sha2-256 encoded peer id - */ - constructor (from) { - log('create: %b', from) - this.from = from - this.heap = new Heap(utils.xorCompare) - } - - /** - * Add a new PeerId to the queue. - * - * @param {PeerId} id - */ - async enqueue (id) { - log('enqueue %s', id.toB58String()) - const key = await utils.convertPeerId(id) - - const el = { - id: id, - distance: uint8ArrayXor(this.from, key) - } - - this.heap.push(el) - } - - /** - * Returns the closest peer to the `from` peer. - * - * @returns {PeerId} - */ - dequeue () { - const el = this.heap.pop() - log('dequeue %s', el.id.toB58String()) - return el.id - } - - get length () { - return this.heap.size() - } -} - -module.exports = PeerQueue diff --git a/src/peer-routing/index.js b/src/peer-routing/index.js index 53ac35ba..032ec0d9 100644 --- a/src/peer-routing/index.js +++ b/src/peer-routing/index.js @@ -1,40 +1,67 @@ 'use strict' const errcode = require('err-code') -const pTimeout = require('p-timeout') - +const { validator } = require('libp2p-record') const PeerId = require('peer-id') -const crypto = require('libp2p-crypto') const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - -const c = require('../constants') -const Message = require('../message') -const Query = require('../query') - +const { Message } = require('../message') const utils = require('../utils') +const { + queryErrorEvent, + finalPeerEvent, + valueEvent +} = require('../query/events') +const PeerDistanceList = require('../peer-list/peer-distance-list') +const { Record } = require('libp2p-record') /** * @typedef {import('multiaddr').Multiaddr} Multiaddr + * @typedef {import('../types').PeerData} PeerData */ -/** - * @param {import('../index')} dht - */ -module.exports = (dht) => { +class PeerRouting { + /** + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('../routing-table').RoutingTable} params.routingTable + * @param {import('../types').PeerStore} params.peerStore + * @param {import('../network').Network} params.network + * @param {import('libp2p-interfaces/src/types').DhtValidators} params.validators + * @param {import('../query/manager').QueryManager} params.queryManager + * @param {boolean} params.lan + */ + constructor ({ peerId, routingTable, peerStore, network, validators, queryManager, lan }) { + this._peerId = peerId + this._routingTable = routingTable + this._peerStore = peerStore + this._network = network + this._validators = validators + this._queryManager = queryManager + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:peer-routing`) + } + /** * Look if we are connected to a peer with the given id. * Returns its id and addresses, if found, otherwise `undefined`. * * @param {PeerId} peer */ - const findPeerLocal = async (peer) => { - dht._log(`findPeerLocal ${peer.toB58String()}`) - const p = await dht.routingTable.find(peer) + async findPeerLocal (peer) { + let peerData + const p = await this._routingTable.find(peer) + + if (p) { + this._log('findPeerLocal found %p in routing table', peer) + peerData = this._peerStore.get(p) + } - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peerData = p && dht.peerStore.get(p) + if (!peerData) { + peerData = this._peerStore.get(peer) + } if (peerData) { + this._log('findPeerLocal found %p in peer store', peer) + return { id: peerData.id, multiaddrs: peerData.addresses.map((address) => address.multiaddr) @@ -47,254 +74,245 @@ module.exports = (dht) => { * * @param {PeerId} peer * @param {Uint8Array} key - * @returns {Promise} - * @private + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - const getValueSingle = async (peer, key) => { // eslint-disable-line require-await + async * _getValueSingle (peer, key, options = {}) { // eslint-disable-line require-await const msg = new Message(Message.TYPES.GET_VALUE, key, 0) - return dht.network.sendRequest(peer, msg) - } - - /** - * Find close peers for a given peer - * - * @param {Uint8Array} key - * @param {PeerId} peer - * @returns {Promise>} - * @private - */ - - const closerPeersSingle = async (key, peer) => { - dht._log(`closerPeersSingle ${uint8ArrayToString(key, 'base32')} from ${peer.toB58String()}`) - const msg = await dht.peerRouting._findPeerSingle(peer, new PeerId(key)) - - return msg.closerPeers - .filter((peerData) => !dht._isSelf(peerData.id)) - .map((peerData) => { - dht.peerStore.addressBook.add(peerData.id, peerData.multiaddrs) - - return peerData - }) + yield * this._network.sendRequest(peer, msg, options) } /** * Get the public key directly from a node. * * @param {PeerId} peer + * @param {object} [options] + * @param {AbortSignal} [options.signal] */ - const getPublicKeyFromNode = async (peer) => { + async * getPublicKeyFromNode (peer, options) { const pkKey = utils.keyForPublicKey(peer) - const msg = await getValueSingle(peer, pkKey) - if (!msg.record || !msg.record.value) { - throw errcode(new Error(`Node not responding with its public key: ${peer.toB58String()}`), 'ERR_INVALID_RECORD') - } + for await (const event of this._getValueSingle(peer, pkKey, options)) { + yield event - const recPeer = await PeerId.createFromPubKey(msg.record.value) + if (event.name === 'PEER_RESPONSE' && event.record) { + const recPeer = await PeerId.createFromPubKey(event.record.value) - // compare hashes of the pub key - if (!recPeer.equals(peer)) { - throw errcode(new Error('public key does not match id'), 'ERR_PUBLIC_KEY_DOES_NOT_MATCH_ID') + // compare hashes of the pub key + if (!recPeer.equals(peer)) { + throw errcode(new Error('public key does not match id'), 'ERR_PUBLIC_KEY_DOES_NOT_MATCH_ID') + } + + yield valueEvent({ from: peer, value: recPeer.pubKey.bytes }) + } } - return recPeer.pubKey + throw errcode(new Error(`Node not responding with its public key: ${peer.toB58String()}`), 'ERR_INVALID_RECORD') } - return { /** - * Ask peer `peer` if they know where the peer with id `target` is. + * Search for a peer with the given ID. * - * @param {PeerId} peer - * @param {PeerId} target - * @returns {Promise} - * @private + * @param {PeerId} id + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] */ - async _findPeerSingle (peer, target) { // eslint-disable-line require-await - dht._log('findPeerSingle %s', peer.toB58String()) - const msg = new Message(Message.TYPES.FIND_NODE, target.id, 0) + async * findPeer (id, options = {}) { + this._log('findPeer %p', id) + + // Try to find locally + const pi = await this.findPeerLocal(id) + + // already got it + if (pi != null) { + this._log('found local') + yield finalPeerEvent({ + from: this._peerId, + peer: pi + }) + return + } - return dht.network.sendRequest(peer, msg) - }, + const key = await utils.convertPeerId(id) + const peers = this._routingTable.closestPeers(key) - /** - * Search for a peer with the given ID. - * - * @param {PeerId} id - * @param {Object} [options] - findPeer options - * @param {number} [options.timeout=60000] - how long the query should maximally run, in milliseconds - * @returns {Promise<{ id: PeerId, multiaddrs: Multiaddr[] }>} - */ - async findPeer (id, options = { timeout: 60000 }) { - options.timeout = options.timeout || c.minute - dht._log('findPeer %s', id.toB58String()) + // sanity check + const match = peers.find((p) => p.equals(id)) - // Try to find locally - const pi = await findPeerLocal(id) + if (match) { + const peer = this._peerStore.get(id) - // already got it - if (pi != null) { - dht._log('found local') - return pi + if (peer) { + this._log('found in peerStore') + yield finalPeerEvent({ + from: this._peerId, + peer: { + id: peer.id, + multiaddrs: peer.addresses.map((address) => address.multiaddr) + } + }) + return } + } - const key = await utils.convertPeerId(id) - const peers = dht.routingTable.closestPeers(key, dht.kBucketSize) + const self = this - if (peers.length === 0) { - throw errcode(new Error('Peer lookup failed'), 'ERR_LOOKUP_FAILED') - } + /** + * @type {import('../query/types').QueryFunc} + */ + const findPeerQuery = async function * ({ peer, signal }) { + const request = new Message(Message.TYPES.FIND_NODE, id.toBytes(), 0) - // sanity check - const match = peers.find((p) => p.isEqual(id)) - if (match) { - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peer = dht.peerStore.get(id) + for await (const event of self._network.sendRequest(peer, request, { signal })) { + yield event - if (peer) { - dht._log('found in peerStore') - return { - id: peer.id, - multiaddrs: peer.addresses.map((address) => address.multiaddr) - } - } - } + if (event.name === 'PEER_RESPONSE') { + const match = event.closer.find((p) => p.id.equals(id)) - // query the network - const query = new Query(dht, id.id, () => { - /** - * There is no distinction between the disjoint paths, so there are no per-path - * variables in dht scope. Just return the actual query function. - * - * @param {PeerId} peer - */ - const queryFn = async (peer) => { - const msg = await this._findPeerSingle(peer, id) - const match = msg.closerPeers.find((p) => p.id.isEqual(id)) - - // found it + // found the peer if (match) { - return { - peer: match, - queryComplete: true - } - } - - return { - closerPeers: msg.closerPeers + yield finalPeerEvent({ from: event.from, peer: match }) } } - - return queryFn - }) - - let result - try { - result = await pTimeout(query.run(peers), options.timeout) - } finally { - query.stop() } + } - let success = false - result.paths.forEach((result) => { - if (result.success && result.peer) { - success = true - dht.peerStore.addressBook.add(result.peer.id, result.peer.multiaddrs) - } - }) - dht._log('findPeer %s: %s', id.toB58String(), success) + let foundPeer = false - if (!success) { - throw errcode(new Error('No peer found'), 'ERR_NOT_FOUND') + for await (const event of this._queryManager.run(id.id, peers, findPeerQuery, options)) { + if (event.name === 'FINAL_PEER') { + foundPeer = true } - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peerData = dht.peerStore.get(id) + yield event + } - if (!peerData) { - throw errcode(new Error('No peer found in peer store'), 'ERR_NOT_FOUND') - } + if (!foundPeer) { + yield queryErrorEvent({ from: this._peerId, error: errcode(new Error('Not found'), 'ERR_NOT_FOUND') }) + } + } - return { - id: peerData.id, - multiaddrs: peerData.addresses.map((address) => address.multiaddr) - } - }, + /** + * Kademlia 'node lookup' operation + * + * @param {Uint8Array} key - the key to look up, could be a the bytes from a multihash or a peer ID + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + */ + async * getClosestPeers (key, options = {}) { + this._log('getClosestPeers to %b', key) + const id = await utils.convertBuffer(key) + const tablePeers = this._routingTable.closestPeers(id) + const self = this + + const peers = new PeerDistanceList(id, this._routingTable._kBucketSize) + tablePeers.forEach(peer => peers.add(peer)) /** - * Kademlia 'node lookup' operation. - * - * @param {Uint8Array} key - * @param {Object} [options] - * @param {boolean} [options.shallow=false] - shallow query - * @returns {AsyncIterable} + * @type {import('../query/types').QueryFunc} */ - async * getClosestPeers (key, options = { shallow: false }) { - dht._log('getClosestPeers to %b', key) - - const id = await utils.convertBuffer(key) - const tablePeers = dht.routingTable.closestPeers(id, dht.kBucketSize) - - const q = new Query(dht, key, () => { - // There is no distinction between the disjoint paths, - // so there are no per-path variables in dht scope. - // Just return the actual query function. - return async (peer) => { - const closer = await closerPeersSingle(key, peer) - - return { - closerPeers: closer, - pathComplete: options.shallow ? true : undefined - } - } - }) + const getCloserPeersQuery = async function * ({ peer, signal }) { + self._log('closerPeersSingle %s from %p', uint8ArrayToString(key, 'base32'), peer) + const request = new Message(Message.TYPES.FIND_NODE, key, 0) - const res = await q.run(tablePeers) - if (!res || !res.finalSet) { - return [] - } + yield * self._network.sendRequest(peer, request, { signal }) + } - const sorted = await utils.sortClosestPeers(Array.from(res.finalSet), id) + for await (const event of this._queryManager.run(key, tablePeers, getCloserPeersQuery, options)) { + yield event - for (const pId of sorted.slice(0, dht.kBucketSize)) { - yield pId + if (event.name === 'PEER_RESPONSE') { + event.closer.forEach(peerData => { + peers.add(peerData.id) + }) } - }, + } - /** - * Get the public key for the given peer id. - * - * @param {PeerId} peer - */ - async getPublicKey (peer) { - dht._log('getPublicKey %s', peer.toB58String()) + this._log('found %d peers close to %b', peers.length, key) - // local check - /** @type {{ id: PeerId, addresses: { multiaddr: Multiaddr }[] }} */ - const peerData = dht.peerStore.get(peer) + yield * peers.peers.map(peer => finalPeerEvent({ + from: this._peerId, + peer: { + id: peer, + multiaddrs: (this._peerStore.addressBook.get(peer) || []).map(addr => addr.multiaddr) + } + })) + } - if (peerData && peerData.id.pubKey) { - dht._log('getPublicKey: found local copy') - return peerData.id.pubKey + /** + * Query a particular peer for the value for the given key. + * It will either return the value or a list of closer peers. + * + * Note: The peerStore is updated with new addresses found for the given peer. + * + * @param {PeerId} peer + * @param {Uint8Array} key + * @param {object} [options] + * @param {AbortSignal} [options.signal] + */ + async * getValueOrPeers (peer, key, options = {}) { + for await (const event of this._getValueSingle(peer, key, options)) { + if (event.name === 'PEER_RESPONSE') { + if (event.record) { + // We have a record + try { + await this._verifyRecordOnline(event.record) + } catch (/** @type {any} */ err) { + const errMsg = 'invalid record received, discarded' + this._log(errMsg) + + yield queryErrorEvent({ from: event.from, error: errcode(new Error(errMsg), 'ERR_INVALID_RECORD') }) + continue + } + } } - // try the node directly - let pk + yield event + } + } - try { - pk = await getPublicKeyFromNode(peer) - } catch (/** @type {any} */ err) { - // try dht directly - const pkKey = utils.keyForPublicKey(peer) - const value = await dht.get(pkKey) - pk = crypto.keys.unmarshalPublicKey(value) - } + /** + * Verify a record, fetching missing public keys from the network. + * Calls back with an error if the record is invalid. + * + * @param {import('../types').DHTRecord} record + * @returns {Promise} + */ + async _verifyRecordOnline ({ key, value, timeReceived }) { + await validator.verifyRecord(this._validators, new Record(key, value, timeReceived)) + } - const peerId = new PeerId(peer.id, undefined, pk) - const addrs = ((peerData && peerData.addresses) || []).map((address) => address.multiaddr) - dht.peerStore.addressBook.add(peerId, addrs) - dht.peerStore.keyBook.set(peerId, pk) + /** + * Get the nearest peers to the given query, but if closer + * than self + * + * @param {Uint8Array} key + * @param {PeerId} closerThan + */ + async getCloserPeersOffline (key, closerThan) { + const id = await utils.convertBuffer(key) + const ids = this._routingTable.closestPeers(id) + const output = ids + .map((p) => { + const peer = this._peerStore.get(p) + + return { + id: p, + multiaddrs: peer ? peer.addresses.map((address) => address.multiaddr) : [] + } + }) + .filter((closer) => !closer.id.equals(closerThan)) - return pk + if (output.length) { + this._log('getCloserPeersOffline found %d peer(s) closer to %b than %p', output.length, key, closerThan) + } else { + this._log('getCloserPeersOffline could not find peer closer to %b than %p', key, closerThan) } + + return output } } + +module.exports.PeerRouting = PeerRouting diff --git a/src/providers.js b/src/providers.js index dcdaffc3..de37d848 100644 --- a/src/providers.js +++ b/src/providers.js @@ -6,8 +6,16 @@ const varint = require('varint') const PeerId = require('peer-id') const { Key } = require('interface-datastore/key') const { default: Queue } = require('p-queue') -const c = require('./constants') +const { + PROVIDERS_CLEANUP_INTERVAL, + PROVIDERS_VALIDITY, + PROVIDERS_LRU_CACHE_SIZE, + PROVIDERS_KEY_PREFIX +} = require('./constants') const utils = require('./utils') +const { toString: uint8ArrayToString } = require('uint8arrays/to-string') + +const log = utils.logger('libp2p:kad-dht:providers') /** * @typedef {import('multiformats/cid').CID} CID @@ -29,34 +37,31 @@ const utils = require('./utils') class Providers { /** * @param {Datastore} datastore - * @param {PeerId} [self] * @param {number} [cacheSize=256] */ - constructor (datastore, self, cacheSize) { + constructor (datastore, cacheSize) { this.datastore = datastore - this._log = utils.logger(self, 'providers') - /** * How often invalid records are cleaned. (in seconds) * * @type {number} */ - this.cleanupInterval = c.PROVIDERS_CLEANUP_INTERVAL + this.cleanupInterval = PROVIDERS_CLEANUP_INTERVAL /** * How long is a provider valid for. (in seconds) * * @type {number} */ - this.provideValidity = c.PROVIDERS_VALIDITY + this.provideValidity = PROVIDERS_VALIDITY /** * LRU cache size * * @type {number} */ - this.lruCacheSize = cacheSize || c.PROVIDERS_LRU_CACHE_SIZE + this.lruCacheSize = cacheSize || PROVIDERS_LRU_CACHE_SIZE // @ts-ignore hashlru types are wrong this.providers = cache(this.lruCacheSize) @@ -100,7 +105,6 @@ class Providers { */ _cleanup () { return this.syncQueue.add(async () => { - this._log('start cleanup') const start = Date.now() let count = 0 @@ -109,7 +113,8 @@ class Providers { const batch = this.datastore.batch() // Get all provider entries from the datastore - const query = this.datastore.query({ prefix: c.PROVIDERS_KEY_PREFIX }) + const query = this.datastore.query({ prefix: PROVIDERS_KEY_PREFIX }) + for await (const entry of query) { try { // Add a delete to the batch for each expired entry @@ -118,8 +123,9 @@ class Providers { const now = Date.now() const delta = now - time const expired = delta > this.provideValidity - this._log('comparing: %d - %d = %d > %d %s', - now, time, delta, this.provideValidity, expired ? '(expired)' : '') + + log('comparing: %d - %d = %d > %d %s', now, time, delta, this.provideValidity, expired ? '(expired)' : '') + if (expired) { deleteCount++ batch.delete(entry.key) @@ -129,24 +135,28 @@ class Providers { } count++ } catch (/** @type {any} */ err) { - this._log.error(err.message) + log.error(err.message) } } - this._log('deleting %d / %d entries', deleteCount, count) // Commit the deletes to the datastore if (deleted.size) { + log('deleting %d / %d entries', deleteCount, count) await batch.commit() + } else { + log('nothing to delete') } // Clear expired entries from the cache for (const [cid, peers] of deleted) { const key = makeProviderKey(cid) const provs = this.providers.get(key) + if (provs) { for (const peerId of peers) { provs.delete(peerId) } + if (provs.size === 0) { this.providers.remove(key) } else { @@ -155,7 +165,7 @@ class Providers { } } - this._log('Cleanup successful (%dms)', Date.now() - start) + log('Cleanup successful (%dms)', Date.now() - start) }) } @@ -170,10 +180,12 @@ class Providers { async _getProvidersMap (cid) { const cacheKey = makeProviderKey(cid) let provs = this.providers.get(cacheKey) + if (!provs) { provs = await loadProviders(this.datastore, cid) this.providers.set(cacheKey, provs) } + return provs } @@ -186,15 +198,16 @@ class Providers { */ async addProvider (cid, provider) { // eslint-disable-line require-await return this.syncQueue.add(async () => { - this._log('addProvider %s', cid.toString()) + log('addProvider %s', cid.toString()) const provs = await this._getProvidersMap(cid) - this._log('loaded %s provs', provs.size) + log('loaded %s provs', provs.size) const now = new Date() - provs.set(utils.encodeBase32(provider.id), now) + provs.set(provider.toString(), now) const dsKey = makeProviderKey(cid) this.providers.set(dsKey, provs) + return writeProviderEntry(this.datastore, cid, provider, now) }) } @@ -207,10 +220,11 @@ class Providers { */ async getProviders (cid) { // eslint-disable-line require-await return this.syncQueue.add(async () => { - this._log('getProviders %s', cid.toString()) + log('getProviders %s', cid.toString()) const provs = await this._getProvidersMap(cid) - return [...provs.keys()].map((base32PeerId) => { - return new PeerId(utils.decodeBase32(base32PeerId)) + + return [...provs.keys()].map(peerIdStr => { + return PeerId.parse(peerIdStr) }) }) } @@ -225,8 +239,9 @@ class Providers { * @private */ function makeProviderKey (cid) { - cid = typeof cid === 'string' ? cid : utils.encodeBase32(cid.bytes) - return c.PROVIDERS_KEY_PREFIX + cid + cid = typeof cid === 'string' ? cid : uint8ArrayToString(cid.multihash.bytes, 'base32') + + return PROVIDERS_KEY_PREFIX + cid } /** @@ -241,11 +256,12 @@ async function writeProviderEntry (store, cid, peer, time) { // eslint-disable-l const dsKey = [ makeProviderKey(cid), '/', - utils.encodeBase32(peer.id) + peer.toString() ].join('') const key = new Key(dsKey) const buffer = Uint8Array.from(varint.encode(time.getTime())) + return store.put(key, buffer) } @@ -256,6 +272,7 @@ async function writeProviderEntry (store, cid, peer, time) { // eslint-disable-l */ function parseProviderKey (key) { const parts = key.toString().split('/') + if (parts.length !== 4) { throw new Error('incorrectly formatted provider entry key in datastore: ' + key) } @@ -278,10 +295,12 @@ function parseProviderKey (key) { async function loadProviders (store, cid) { const providers = new Map() const query = store.query({ prefix: makeProviderKey(cid) }) + for await (const entry of query) { const { peerId } = parseProviderKey(entry.key) providers.set(peerId, readTime(entry.value)) } + return providers } @@ -292,4 +311,4 @@ function readTime (buf) { return varint.decode(buf) } -module.exports = Providers +module.exports.Providers = Providers diff --git a/src/query-manager.js b/src/query-manager.js deleted file mode 100644 index 33ce5a59..00000000 --- a/src/query-manager.js +++ /dev/null @@ -1,56 +0,0 @@ -'use strict' - -/** - * @typedef {import('./query')} Query - */ - -/** - * Keeps track of all running queries. - */ -class QueryManager { - /** - * Creates a new QueryManager. - */ - constructor () { - this.queries = new Set() - this.running = false - } - - /** - * Called when a query is started. - * - * @param {Query} query - */ - queryStarted (query) { - this.queries.add(query) - } - - /** - * Called when a query completes. - * - * @param {Query} query - */ - queryCompleted (query) { - this.queries.delete(query) - } - - /** - * Starts the query manager. - */ - start () { - this.running = true - } - - /** - * Stops all queries. - */ - stop () { - this.running = false - for (const query of this.queries) { - query.stop() - } - this.queries.clear() - } -} - -module.exports = QueryManager diff --git a/src/query-self.js b/src/query-self.js new file mode 100644 index 00000000..3295898c --- /dev/null +++ b/src/query-self.js @@ -0,0 +1,78 @@ +'use strict' + +const { EventEmitter } = require('events') +const take = require('it-take') +const length = require('it-length') +const { QUERY_SELF_INTERVAL, K } = require('./constants') +const utils = require('./utils') + +/** + * Receives notifications of new peers joining the network that support the DHT protocol + */ +class QuerySelf extends EventEmitter { + /** + * Create a new network + * + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('./peer-routing').PeerRouting} params.peerRouting + * @param {number} [params.count] - how many peers to find + * @param {number} [params.interval] - how often to find them + * @param {boolean} params.lan + */ + constructor ({ peerId, peerRouting, lan, count = K, interval = QUERY_SELF_INTERVAL }) { + super() + + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:query-self`) + this._running = false + this._peerId = peerId + this._peerRouting = peerRouting + this._count = count + this._interval = interval + } + + /** + * Start the network + */ + start () { + if (this._running) { + return + } + + this._running = true + this._querySelf() + } + + /** + * Stop all network activity + */ + stop () { + this._running = false + + if (this._timeoutId) { + clearTimeout(this._timeoutId) + } + + if (this._controller) { + this._controller.abort() + } + } + + async _querySelf () { + try { + this._controller = new AbortController() + + const found = await length(await take(this._peerRouting.getClosestPeers(this._peerId.toBytes(), { + signal: this._controller.signal + }), this._count)) + + this._log('query ran successfully - found %d peers', found) + } catch (err) { + this._log('query error', err) + } finally { + this._timeoutId = setTimeout(this._querySelf.bind(this), this._interval) + } + } +} + +module.exports.QuerySelf = QuerySelf diff --git a/src/query/disjoint-path.js b/src/query/disjoint-path.js new file mode 100644 index 00000000..a7a29088 --- /dev/null +++ b/src/query/disjoint-path.js @@ -0,0 +1,228 @@ +'use strict' + +const { default: Queue } = require('p-queue') +const { xor } = require('uint8arrays/xor') +const { toString } = require('uint8arrays/to-string') +const defer = require('p-defer') +const errCode = require('err-code') +const { convertPeerId, convertBuffer } = require('../utils') +const { TimeoutController } = require('timeout-abort-controller') +const { anySignal } = require('any-signal') +const { queryErrorEvent } = require('./events') + +const MAX_XOR = BigInt('0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../types').QueryEvent} QueryEvent + * @typedef {import('./types').QueryFunc} QueryFunc + */ + +/** + * Walks a path through the DHT, calling the passed query function for + * every peer encountered that we have not seen before. + * + * @param {object} context + * @param {Uint8Array} context.key - what are we trying to find + * @param {PeerId} context.startingPeer - where we start our query + * @param {PeerId} context.ourPeerId - who we are + * @param {Set} context.peersSeen - list of base58btc peer IDs all paths have traversed + * @param {AbortSignal} context.signal - when to stop querying + * @param {QueryFunc} context.query - the query function to run with each peer + * @param {number} context.alpha - how many concurrent node/value lookups to run + * @param {number} context.pathIndex - how many concurrent node/value lookups to run + * @param {number} context.numPaths - how many concurrent node/value lookups to run + * @param {import('events').EventEmitter} context.cleanUp - will emit a 'cleanup' event if the caller exits the for..await of early + * @param {number} [context.queryFuncTimeout] - a timeout for queryFunc in ms + * @param {ReturnType} context.log + */ +module.exports.disjointPathQuery = async function * disjointPathQuery ({ key, startingPeer, ourPeerId, peersSeen, signal, query, alpha, pathIndex, numPaths, cleanUp, queryFuncTimeout, log }) { + // Only ALPHA node/value lookups are allowed at any given time for each process + // https://github.com/libp2p/specs/tree/master/kad-dht#alpha-concurrency-parameter-%CE%B1 + const queue = new Queue({ + concurrency: alpha + }) + + // perform lookups on kadId, not the actual value + const kadId = await convertBuffer(key) + + /** + * Adds the passed peer to the query queue if it's not us and no + * other path has passed through this peer + * + * @param {PeerId} peer + * @param {Uint8Array} peerKadId + */ + function queryPeer (peer, peerKadId) { + if (!peer) { + return + } + + peersSeen.add(peer.toB58String()) + + const peerXor = BigInt('0x' + toString(xor(peerKadId, kadId), 'base16')) + + queue.add(async () => { + let timeout + const signals = [signal] + + if (queryFuncTimeout != null) { + timeout = new TimeoutController(queryFuncTimeout) + signals.push(timeout.signal) + } + + const compoundSignal = anySignal(signals) + + try { + for await (const event of query({ + key, + peer, + signal: compoundSignal, + pathIndex, + numPaths + })) { + if (compoundSignal.aborted) { + return + } + + // if there are closer peers and the query has not completed, continue the query + if (event.name === 'PEER_RESPONSE') { + for (const closerPeer of event.closer) { + if (peersSeen.has(closerPeer.id.toB58String())) { // eslint-disable-line max-depth + log('already seen %p in query', closerPeer.id) + continue + } + + if (ourPeerId.equals(closerPeer.id)) { // eslint-disable-line max-depth + log('not querying ourselves') + continue + } + + const closerPeerKadId = await convertPeerId(closerPeer.id) + const closerPeerXor = BigInt('0x' + toString(xor(closerPeerKadId, kadId), 'base16')) + + // only continue query if closer peer is actually closer + if (closerPeerXor > peerXor) { // eslint-disable-line max-depth + log('skipping %p as they are not closer to %b than %p', closerPeer.id, key, peer) + // TODO: uncomment this + // continue + } + + log('querying closer peer %p', closerPeer.id) + queryPeer(closerPeer.id, closerPeerKadId) + } + } + + // @ts-ignore simulate p-queue@7.x.x event + queue.emit('completed', event) + } + + timeout && timeout.clear() + } catch (/** @type {any} */ err) { + if (signal.aborted) { + // @ts-ignore simulate p-queue@7.x.x event + queue.emit('error', err) + } else { + // @ts-ignore simulate p-queue@7.x.x event + queue.emit('completed', queryErrorEvent({ + from: peer, + error: err + })) + } + } finally { + timeout && timeout.clear() + } + }, { + // use xor value as the queue priority - closer peers should execute first + // subtract it from MAX_XOR because higher priority values execute sooner + + // @ts-expect-error this is supposed to be a Number but it's ok to use BigInts + // as long as all priorities are BigInts since we won't mix BigInts and Number + // values in arithmetic operations + priority: MAX_XOR - peerXor + }) + } + + // begin the query with the starting peer + queryPeer(startingPeer, await convertPeerId(startingPeer)) + + // yield results as they come in + yield * toGenerator(queue, signal, cleanUp, log) +} + +/** + * @param {Queue} queue + * @param {AbortSignal} signal + * @param {import('events').EventEmitter} cleanUp + * @param {ReturnType} log + */ +async function * toGenerator (queue, signal, cleanUp, log) { + let deferred = defer() + let running = true + /** @type {QueryEvent[]} */ + const results = [] + + const cleanup = () => { + if (!running) { + return + } + + log('clean up queue, results %d, queue size %d, pending tasks %d', results.length, queue.size, queue.pending) + + running = false + queue.clear() + results.splice(0, results.length) + } + + // @ts-expect-error 'completed' event is in p-queue@7.x.x + queue.on('completed', result => { + results.push(result) + deferred.resolve() + }) + // @ts-expect-error 'error' event is in p-queue@7.x.x + queue.on('error', err => { + log('queue error', err) + cleanup() + deferred.reject(err) + }) + queue.on('idle', () => { + log('queue idle') + running = false + deferred.resolve() + }) + + // clear the queue and throw if the query is aborted + signal.addEventListener('abort', () => { + log('abort queue') + const wasRunning = running + cleanup() + + if (wasRunning) { + deferred.reject(errCode(new Error('Query aborted'), 'ERR_QUERY_ABORTED')) + } + }) + + // the user broke out of the loop early, ensure we resolve the deferred result + // promise and clear the queue of any remaining jobs + cleanUp.on('cleanup', () => { + cleanup() + deferred.resolve() + }) + + while (running) { // eslint-disable-line no-unmodified-loop-condition + await deferred.promise + deferred = defer() + + // yield all available results + while (results.length) { + const result = results.shift() + + if (result) { + yield result + } + } + } + + // yield any remaining results + yield * results +} diff --git a/src/query/events.js b/src/query/events.js new file mode 100644 index 00000000..a8d66031 --- /dev/null +++ b/src/query/events.js @@ -0,0 +1,152 @@ +'use strict' + +const { MESSAGE_TYPE_LOOKUP } = require('../message') + +/** @type {import('../types').MessageName[]} */ +const MESSAGE_NAMES = [ + 'PUT_VALUE', + 'GET_VALUE', + 'ADD_PROVIDER', + 'GET_PROVIDERS', + 'FIND_NODE', + 'PING' +] + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../types').QueryEvent} QueryEvent + * @typedef {import('../types').PeerData} PeerData + * @typedef {import('../message').Message} Message + * @typedef {import('../message/dht').Message.MessageType} MessageType + * @typedef {import('../types').DHTRecord} Record + */ + +/** + * @param {object} fields + * @param {PeerId} fields.to + * @param {number} fields.type + * @returns {import('../types').SendingQueryEvent} + */ +function sendingQueryEvent (fields) { + return { + ...fields, + name: 'SENDING_QUERY', + type: 0, + // @ts-expect-error MESSAGE_TYPE_LOOKUP is string[] + messageName: MESSAGE_TYPE_LOOKUP[fields.type], + messageType: fields.type + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.from + * @param {MessageType} fields.messageType + * @param {PeerData[]} [fields.closer] + * @param {PeerData[]} [fields.providers] + * @param {Record} [fields.record] + * @returns {import('../types').PeerResponseEvent} + */ +function peerResponseEvent (fields) { + return { + ...fields, + name: 'PEER_RESPONSE', + type: 1, + messageName: MESSAGE_NAMES[fields.messageType], + closer: fields.closer ? fields.closer : [], + providers: fields.providers ? fields.providers : [] + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.from + * @param {PeerData} fields.peer + * @returns {import('../types').FinalPeerEvent} + */ +function finalPeerEvent (fields) { + return { + ...fields, + name: 'FINAL_PEER', + type: 2 + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.from + * @param {Error} fields.error + * @returns {import('../types').QueryErrorEvent} + */ +function queryErrorEvent (fields) { + return { + ...fields, + name: 'QUERY_ERROR', + type: 3 + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.from + * @param {PeerData[]} fields.providers + * @returns {import('../types').ProviderEvent} + */ +function providerEvent (fields) { + return { + ...fields, + name: 'PROVIDER', + type: 4 + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.from + * @param {Uint8Array} fields.value + * @returns {import('../types').ValueEvent} + */ +function valueEvent (fields) { + return { + ...fields, + name: 'VALUE', + type: 5 + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.peer + * @returns {import('../types').AddingPeerEvent} + */ +function addingPeerEvent (fields) { + return { + ...fields, + name: 'ADDING_PEER', + type: 6 + } +} + +/** + * @param {object} fields + * @param {PeerId} fields.peer + * @returns {import('../types').DialingPeerEvent} + */ +function dialingPeerEvent (fields) { + return { + ...fields, + name: 'DIALING_PEER', + type: 7 + } +} + +module.exports = { + sendingQueryEvent, + peerResponseEvent, + finalPeerEvent, + queryErrorEvent, + providerEvent, + valueEvent, + addingPeerEvent, + dialingPeerEvent +} diff --git a/src/query/index.js b/src/query/index.js deleted file mode 100644 index 2ac7a202..00000000 --- a/src/query/index.js +++ /dev/null @@ -1,131 +0,0 @@ -'use strict' - -const { base58btc } = require('multiformats/bases/base58') - -const utils = require('../utils') -const Run = require('./run') - -/** - * @typedef {import('peer-id')} PeerId - * @typedef {{from: PeerId, val: Uint8Array}} DHTQueryValue - * @typedef {{from: PeerId, err: Error}} DHTQueryError - * @typedef {DHTQueryValue | DHTQueryError} DHTQueryResult - * @typedef {import('../').PeerData} PeerData - * - * @typedef {{ pathComplete?: boolean, queryComplete?: boolean, closerPeers?: PeerData[], peer?: PeerData, success?: boolean }} QueryResult - */ - -/** - * User-supplied function to set up an individual disjoint path. Per-path - * query state should be held in this function's closure. - * - * Accepts the numeric index from zero to numPaths - 1 and returns a function - * to call on each peer in the query. - * - * @typedef {(pathIndex: number, numPaths: number) => QueryFunc } MakeQueryFunc - */ - -/** - * Query function - * - * @typedef {(peer: PeerId) => Promise } QueryFunc - */ - -/** - * Divide peers up into disjoint paths (subqueries). Any peer can only be used once over all paths. - * Within each path, query peers from closest to farthest away. - */ -class Query { - /** - * Create a new query. The makePath function is called once per disjoint path, so that per-path - * variables can be created in that scope. makePath then returns the actual query function (queryFunc) to - * use when on that path. - * - * @param {import('../index')} dht - DHT instance - * @param {Uint8Array} key - * @param {MakeQueryFunc} makePath - Called to set up each disjoint path. Must return the query function. - */ - constructor (dht, key, makePath) { - this.dht = dht - this.key = key - this.makePath = makePath - this._log = utils.logger(this.dht.peerId, 'query:' + base58btc.baseEncode(key)) - - this.running = false - - this._onStart = this._onStart.bind(this) - this._onComplete = this._onComplete.bind(this) - } - - /** - * Run this query, start with the given list of peers first. - * - * @param {PeerId[]} peers - */ - async run (peers) { // eslint-disable-line require-await - if (!this.dht._queryManager.running) { - this._log.error('Attempt to run query after shutdown') - return { finalSet: new Set(), paths: [] } - } - - if (peers.length === 0) { - this._log.error('Running query with no peers') - return { finalSet: new Set(), paths: [] } - } - - this._run = new Run(this) - - this._log(`query running with K=${this.dht.kBucketSize}, A=${this.dht.concurrency}, D=${Math.min(this.dht.disjointPaths, peers.length)}`) - this._run.once('start', this._onStart) - this._run.once('complete', this._onComplete) - - return this._run.execute(peers) - } - - /** - * Called when the run starts. - */ - _onStart () { - this.running = true - this._startTime = Date.now() - this._log('query:start') - - // Register this query so we can stop it if the DHT stops - this.dht._queryManager.queryStarted(this) - } - - /** - * Called when the run completes (even if there's an error). - */ - _onComplete () { - // Ensure worker queues for all paths are stopped at the end of the query - this.stop() - } - - /** - * Stop the query. - */ - stop () { - this._log(`query:done in ${Date.now() - (this._startTime || 0)}ms`) - - if (this._run) { - this._log(`${this._run.errors.length} of ${this._run.peersSeen.size} peers errored (${this._run.errors.length / this._run.peersSeen.size * 100}% fail rate)`) - } - - if (!this.running) { - return - } - - this.running = false - - if (this._run) { - this._run.removeListener('start', this._onStart) - this._run.removeListener('complete', this._onComplete) - this._run.stop() - } - - this.dht._queryManager.queryCompleted(this) - } -} - -module.exports = Query diff --git a/src/query/manager.js b/src/query/manager.js new file mode 100644 index 00000000..a4d32b69 --- /dev/null +++ b/src/query/manager.js @@ -0,0 +1,163 @@ +'use strict' + +const { AbortController } = require('native-abort-controller') +const { anySignal } = require('any-signal') +const { + ALPHA, K +} = require('../constants') +const { toString: uint8ArrayToString } = require('uint8arrays/to-string') +const { logger } = require('../utils') +const { disjointPathQuery } = require('./disjoint-path') +const merge = require('it-merge') +const { + EventEmitter, + // @ts-expect-error only available in node 15+ + setMaxListeners +} = require('events') + +/** + * @typedef {import('peer-id')} PeerId + */ + +/** + * Keeps track of all running queries + */ +class QueryManager { + /** + * Creates a new QueryManager + * + * @param {object} params + * @param {PeerId} params.peerId + * @param {boolean} params.lan + * @param {number} [params.disjointPaths] + * @param {number} [params.alpha] + */ + constructor ({ peerId, lan, disjointPaths = K, alpha = ALPHA }) { + this._peerId = peerId + this._disjointPaths = disjointPaths || K + this._controllers = new Set() + this._running = false + this._alpha = alpha || ALPHA + this._lan = lan + } + + /** + * Starts the query manager + */ + start () { + this._running = true + } + + /** + * Stops all queries + */ + stop () { + this._running = false + + for (const controller of this._controllers) { + controller.abort() + } + + this._controllers.clear() + } + + /** + * @template T + * + * @param {Uint8Array} key + * @param {PeerId[]} peers + * @param {import('./types').QueryFunc} queryFunc + * @param {object} [options] + * @param {AbortSignal} [options.signal] + * @param {number} [options.queryFuncTimeout] + * + * @returns {AsyncIterable} + */ + async * run (key, peers, queryFunc, options = {}) { + if (!this._running) { + throw new Error('QueryManager not started') + } + + // allow us to stop queries on shut down + const abortController = new AbortController() + this._controllers.add(abortController) + const signals = [abortController.signal] + options.signal && signals.push(options.signal) + const signal = anySignal(signals) + + // this signal will get listened to for every invocation of queryFunc + // so make sure we don't make a lot of noise in the logs + try { + setMaxListeners && setMaxListeners(0, signal) + } catch {} // fails on node < 15 + + const log = logger(`libp2p:kad-dht:${this._lan ? 'lan' : 'wan'}:query:` + uint8ArrayToString(key, 'base58btc')) + + // query a subset of peers up to `kBucketSize / 2` in length + const peersToQuery = peers.slice(0, Math.min(this._disjointPaths, peers.length)) + const startTime = Date.now() + const cleanUp = new EventEmitter() + + try { + log('query:start') + + if (peers.length === 0) { + log.error('Running query with no peers') + return + } + + // The paths must be disjoint, meaning that no two paths in the query may + // traverse the same peer + const peersSeen = new Set() + + // Create disjoint paths + const paths = peersToQuery.map((peer, index) => { + return disjointPathQuery({ + key, + startingPeer: peer, + ourPeerId: this._peerId, + peersSeen, + signal, + query: queryFunc, + pathIndex: index, + numPaths: peersToQuery.length, + alpha: this._alpha, + cleanUp, + queryFuncTimeout: options.queryFuncTimeout, + log + }) + }) + + /** @type {Error[]} */ + const errors = [] + + // Execute the query along each disjoint path and yield their results as they become available + for await (const event of merge(...paths)) { + if (!event) { + continue + } + + yield event + + if (event.name === 'QUERY_ERROR' && event.error) { + log('error', event.error) + errors.push(event.error) + } + } + + log(`${errors.length} of ${peersSeen.size} peers errored (${errors.length / peersSeen.size * 100}% fail rate)`) + } catch (/** @type {any} */ err) { + if (!this._running && err.code === 'ERR_QUERY_ABORTED') { + // ignore query aborted errors that were thrown during query manager shutdown + } else { + throw err + } + } finally { + this._controllers.delete(abortController) + cleanUp.emit('cleanup') + log(`query:done in ${Date.now() - (startTime || 0)}ms`) + } + } +} + +module.exports.QueryManager = QueryManager diff --git a/src/query/path.js b/src/query/path.js deleted file mode 100644 index 98dd20ec..00000000 --- a/src/query/path.js +++ /dev/null @@ -1,86 +0,0 @@ -'use strict' - -const PeerQueue = require('../peer-list/peer-queue') -const utils = require('../utils') - -// TODO: Temporary until parallel dial in Switch have a proper -// timeout. Requires async/await refactor of transports and -// dial abort logic. This gives us 30s to complete the `queryFunc`. -// This should help reduce the high end call times of queries -const QUERY_FUNC_TIMEOUT = 30e3 - -/** - * @typedef {import('peer-id')} PeerId - */ - -/** - * Manages a single Path through the DHT. - */ -class Path { - /** - * Creates a Path. - * - * @param {import('./run')} run - * @param {import('./index').QueryFunc} queryFunc - */ - constructor (run, queryFunc) { - this.run = run - this.queryFunc = utils.withTimeout(queryFunc, QUERY_FUNC_TIMEOUT) - if (!this.queryFunc) throw new Error('Path requires a `queryFn` to be specified') - if (typeof this.queryFunc !== 'function') throw new Error('Path expected `queryFn` to be a function. Got ' + typeof this.queryFunc) - - /** @type {PeerId[]} */ - this.initialPeers = [] - - /** @type {PeerQueue | null} */ - this.peersToQuery = null - - /** @type {import('./index').QueryResult | null} */ - this.res = null - } - - /** - * Add a peer to the set of peers that are used to intialize the path. - * - * @param {PeerId} peer - */ - addInitialPeer (peer) { - this.initialPeers.push(peer) - } - - /** - * Execute the path - */ - async execute () { - // Create a queue of peers ordered by distance from the key - const queue = await PeerQueue.fromKey(this.run.query.key) - // Add initial peers to the queue - this.peersToQuery = queue - await Promise.all(this.initialPeers.map(peer => this.addPeerToQuery(peer))) - await this.run.workerQueue(this) - } - - /** - * Add a peer to the peers to be queried. - * - * @param {PeerId} peer - */ - async addPeerToQuery (peer) { - // Don't add self - if (this.run.query.dht._isSelf(peer)) { - return - } - - // The paths must be disjoint, meaning that no two paths in the Query may - // traverse the same peer - if (this.run.peersSeen.has(peer.toB58String())) { - return - } - - if (this.peersToQuery) { - await this.peersToQuery.enqueue(peer) - } - } -} - -module.exports = Path diff --git a/src/query/run.js b/src/query/run.js deleted file mode 100644 index c9492eaf..00000000 --- a/src/query/run.js +++ /dev/null @@ -1,213 +0,0 @@ -'use strict' - -const PeerDistanceList = require('../peer-list/peer-distance-list') -const EventEmitter = require('events') - -const Path = require('./path') -const WorkerQueue = require('./worker-queue') -const utils = require('../utils') - -/** - * @typedef {import('peer-id')} PeerId - */ - -/** - * Manages a single run of the query. - */ -class Run extends EventEmitter { - /** - * Creates a Run. - * - * @param {import('./index')} query - */ - constructor (query) { - super() - - this.query = query - - this.running = false - - /** @type {WorkerQueue[]} */ - this.workers = [] - - // The peers that have been queried (including error responses) - this.peersSeen = new Set() - - // The errors received when querying peers - /** @type {Error[]} */ - this.errors = [] - - // The closest K peers that have been queried successfully - // (this member is initialized when the worker queues start) - /** @type {PeerDistanceList | null} */ - this.peersQueried = null - } - - /** - * Stop all the workers - */ - stop () { - if (!this.running) { - return - } - - this.running = false - for (const worker of this.workers) { - worker.stop() - } - } - - /** - * Execute the run with the given initial set of peers. - * - * @param {PeerId[]} peers - */ - async execute (peers) { - /** @type {import('./path')[]} */ - const paths = [] // array of states per disjoint path - - // Create disjoint paths - const numPaths = Math.min(this.query.dht.disjointPaths, peers.length) - for (let i = 0; i < numPaths; i++) { - paths.push(new Path(this, this.query.makePath(i, numPaths))) - } - - // Assign peers to paths round-robin style - peers.forEach((peer, i) => { - paths[i % numPaths].addInitialPeer(peer) - }) - - // Execute the query along each disjoint path - await this.executePaths(paths) - - const res = { - // The closest K peers we were able to query successfully - finalSet: new Set(this.peersQueried && this.peersQueried.peers), - - /** @type {import('./index').QueryResult[]} */ - paths: [] - } - - // Collect the results from each completed path - for (const path of paths) { - if (path.res && (path.res.pathComplete || path.res.queryComplete)) { - path.res.success = true - res.paths.push(path.res) - } - } - - return res - } - - /** - * Execute all paths through the DHT. - * - * @param {Array} paths - * @returns {Promise} - */ - async executePaths (paths) { - this.running = true - - this.emit('start') - try { - await Promise.all(paths.map(path => path.execute())) - } finally { - // Ensure all workers are stopped - this.stop() - // Completed the Run - this.emit('complete') - } - - // If all queries errored out, something is seriously wrong, so callback - // with an error - if (this.errors.length === this.peersSeen.size) { - throw this.errors[0] - } - } - - /** - * Initialize the list of queried peers, then start a worker queue for the - * given path. - * - * @param {Path} path - * @returns {Promise} - */ - async workerQueue (path) { - await this.init() - await this.startWorker(path) - } - - /** - * Create and start a worker queue for a particular path. - * - * @param {Path} path - * @returns {Promise} - */ - async startWorker (path) { - const worker = new WorkerQueue(this.query.dht, this, path, this.query._log) - this.workers.push(worker) - await worker.execute() - } - - /** - * Initialize the list of closest peers we've queried - this is shared by all - * paths in the run. - * - * @returns {Promise} - */ - async init () { - if (this.peersQueried) { - return - } - - // We only want to initialize the PeerDistanceList once for the run - if (this.peersQueriedPromise) { - await this.peersQueriedPromise - return - } - - // This promise is temporarily stored so that others may await its completion - this.peersQueriedPromise = (async () => { - const dhtKey = await utils.convertBuffer(this.query.key) - this.peersQueried = new PeerDistanceList(dhtKey, this.query.dht.kBucketSize) - })() - - // After PeerDistanceList is initialized, clean up - await this.peersQueriedPromise - delete this.peersQueriedPromise - } - - /** - * If we've queried K peers, and the remaining peers in the given `worker`'s queue - * are all further from the key than the peers we've already queried, then we should - * stop querying on that `worker`. - * - * @param {WorkerQueue} worker - * @returns {Promise} - */ - async continueQuerying (worker) { - // If we haven't queried K peers yet, keep going - if (this.peersQueried && this.peersQueried.length < this.peersQueried.capacity) { - return true - } - - // Get all the peers that are currently being queried. - // Note that this function gets called right after a peer has been popped - // off the head of the closest peers queue so it will include that peer. - const running = Array.from(worker.queuedPeerIds) - - // Check if any of the peers that are currently being queried are closer - // to the key than the peers we've already queried - const someCloser = this.peersQueried && await this.peersQueried.anyCloser(running) - - // Some are closer, the worker should keep going - if (someCloser) { - return true - } - - // None are closer, the worker can stop - return false - } -} - -module.exports = Run diff --git a/src/query/types.ts b/src/query/types.ts new file mode 100644 index 00000000..4a90b234 --- /dev/null +++ b/src/query/types.ts @@ -0,0 +1,22 @@ +import type PeerId from 'peer-id' +import type { QueryEvent } from '../types' + +export interface QueryContext { + // the key we are looking up + key: Uint8Array + // the current peer being queried + peer: PeerId + // if this signal emits an 'abort' event, any long-lived processes or requests started as part of this query should be terminated + signal: AbortSignal + // which disjoint path we are following + pathIndex: number + // the total number of disjoint paths being executed + numPaths: number +} + +/** + * Query function + */ +export interface QueryFunc { + (context: QueryContext): AsyncIterable +} diff --git a/src/query/worker-queue.js b/src/query/worker-queue.js deleted file mode 100644 index 877a8506..00000000 --- a/src/query/worker-queue.js +++ /dev/null @@ -1,294 +0,0 @@ -'use strict' - -const { default: Queue } = require('p-queue') - -/** - * @typedef {import('peer-id')} PeerId - */ - -class WorkerQueue { - /** - * Creates a new WorkerQueue. - * - * @param {import('../index')} dht - * @param {import('./run')} run - * @param {import('./path')} path - * @param {Function & {error: Function}} log - */ - constructor (dht, run, path, log) { - this.dht = dht - this.run = run - this.path = path - this.log = log - - this.concurrency = this.dht.concurrency - this.queue = this.setupQueue() - // a container for resolve/reject functions that will be populated - // when execute() is called - - /** @type {{ resolve: (result?: any) => void, reject: (err: Error) => void} | null} */ - this.execution = null - - /** @type {Set} */ - this.queuedPeerIds = new Set() - } - - /** - * Create the underlying async queue. - * - * @returns {Queue} - */ - setupQueue () { - const q = new Queue({ - concurrency: this.concurrency - }) - - // When all peers in the queue have been processed, stop the worker - q.on('idle', () => { - if (this.path.peersToQuery && !this.path.peersToQuery.length) { - this.log('queue:drain') - this.stop() - } - }) - - // When a space opens up in the queue, add some more peers - q.on('next', () => { - if (!this.running) { - return - } - - if (q.pending < this.concurrency) { - this.fill() - } - }) - - return q - } - - /** - * Stop the worker, optionally providing an error to pass to the worker's - * callback. - * - * @param {Error} [err] - */ - stop (err) { - if (!this.running) { - return - } - - this.running = false - this.queue.clear() - this.log('worker:stop, %d workers still running', this.run.workers.filter(w => w.running).length) - - if (this.execution) { - if (err) { - this.execution.reject(err) - } else { - this.execution.resolve() - } - } - } - - /** - * Use the queue from async to keep `concurrency` amount items running - * per path. - * - * @returns {Promise} - */ - async execute () { - this.running = true - // store the promise resolution functions to be resolved at end of queue - this.execution = null - const execPromise = new Promise((resolve, reject) => { - this.execution = { - resolve, reject - } - }) - // start queue - this.fill() - // await completion - await execPromise - } - - /** - * Add peers to the worker queue until there are enough to satisfy the - * worker queue concurrency. - * Note that we don't want to take any more than those required to satisfy - * concurrency from the peers-to-query queue, because we always want to - * query the closest peers to the key first, and new peers are continuously - * being added to the peers-to-query queue. - */ - fill () { - if (!this.path.peersToQuery) { - return - } - - // Note: - // - queue.pending: number of items that are currently running - // - queue.size: the number of items that are waiting to be run - while (this.queue.pending + this.queue.size < this.concurrency && this.path.peersToQuery.length > 0) { - const peer = this.path.peersToQuery.dequeue() - - // store the peer id so we can potentially abort early - this.queuedPeerIds.add(peer) - - this.queue.add( - () => { - return this.processNext(peer) - .catch(err => { - this.log.error('queue', err) - this.stop(err) - }) - .finally(() => { - this.queuedPeerIds.delete(peer) - }) - } - ) - } - } - - /** - * Process the next peer in the queue - * - * @param {PeerId} peer - */ - async processNext (peer) { - if (!this.running) { - return - } - - // The paths must be disjoint, meaning that no two paths in the Query may - // traverse the same peer - if (this.run.peersSeen.has(peer.toB58String())) { - return - } - - // Check if we've queried enough peers already - let continueQuerying, continueQueryingError - try { - continueQuerying = await this.run.continueQuerying(this) - } catch (/** @type {any} */ err) { - continueQueryingError = err - } - - // Abort and ignore any error if we're no longer running - if (!this.running) { - return - } - - if (continueQueryingError) { - throw continueQueryingError - } - - // No peer we're querying is closer, stop the queue - // This will cause queries that may potentially result in - // closer nodes to be ended, but it reduces overall query time - if (!continueQuerying) { - this.stop() - return - } - - // Check if another path has queried this peer in the mean time - if (this.run.peersSeen.has(peer.toB58String())) { - return - } - this.run.peersSeen.add(peer.toB58String()) - - // Execute the query on the next peer - this.log('queue:work') - let state, execError - try { - state = await this.execQuery(peer) - } catch (/** @type {any} */ err) { - execError = err - } - - // Abort and ignore any error if we're no longer running - if (!this.running) { - return - } - - this.log('queue:work:done', execError, state) - - if (execError) { - throw execError - } - - // If query is complete, stop all workers. - // Note: run.stop() calls stop() on all the workers, which kills the - // queue and resolves execution - if (state && state.queryComplete) { - this.log('query:complete') - this.run.stop() - return - } - - // If path is complete, just stop this worker. - // Note: this.stop() kills the queue and resolves execution - if (state && state.pathComplete) { - this.stop() - } - } - - /** - * Execute a query on the next peer. - * - * @param {PeerId} peer - */ - async execQuery (peer) { - let res, queryError - try { - res = await this.path.queryFunc(peer) - } catch (/** @type {any} */ err) { - queryError = err - } - - // Abort and ignore any error if we're no longer running - if (!this.running) { - return - } - - if (queryError) { - this.run.errors.push(queryError) - return - } - - // Add the peer to the closest peers we have successfully queried - this.run.peersQueried && await this.run.peersQueried.add(peer) - - if (!res) { - return - } - - // If the query indicates that this path or the whole query is complete - // set the path result and bail out - if (res.pathComplete || res.queryComplete) { - this.path.res = res - - return { - pathComplete: res.pathComplete, - queryComplete: res.queryComplete - } - } - - // If there are closer peers to query, add them to the queue - if (res.closerPeers && res.closerPeers.length > 0) { - /** - * @param {import('../').PeerData} closer - */ - const queryCloser = async (closer) => { - // don't add ourselves - if (this.dht._isSelf(closer.id)) { - return - } - - this.dht._peerDiscovered(closer.id, closer.multiaddrs) - await this.path.addPeerToQuery(closer.id) - } - - await Promise.all(res.closerPeers.map(queryCloser)) - } - } -} - -module.exports = WorkerQueue diff --git a/src/routing-table/index.js b/src/routing-table/index.js index 2429114b..d3833c84 100644 --- a/src/routing-table/index.js +++ b/src/routing-table/index.js @@ -1,52 +1,18 @@ 'use strict' -// @ts-ignore +// @ts-expect-error no types const KBuck = require('k-bucket') -const { xor: uint8ArrayXor } = require('uint8arrays/xor') -const GENERATED_PREFIXES = require('./generated-prefix-list.json') -const { sha256 } = require('multiformats/hashes/sha2') -const crypto = require('libp2p-crypto') -const PeerId = require('peer-id') const utils = require('../utils') -const debug = require('debug') -const log = Object.assign(debug('libp2p:dht:routing-table'), { - error: debug('libp2p:dht:routing-table:error') -}) -// @ts-ignore -const length = require('it-length') const { default: Queue } = require('p-queue') const { PROTOCOL_DHT } = require('../constants') -// @ts-expect-error no types -const TimeoutController = require('timeout-abort-controller') - -/** - * @typedef {object} KBucketPeer - * @property {Uint8Array} id - * @property {PeerId} peer - * - * @typedef {object} KBucket - * @property {Uint8Array} id - * @property {KBucketPeer[]} contacts - * @property {boolean} dontSplit - * @property {KBucket} left - * @property {KBucket} right - * - * @typedef {object} KBucketTree - * @property {KBucket} root - * @property {Uint8Array} localNodeId - * @property {(event: string, callback: Function) => void} on - * @property {(key: Uint8Array, count: number) => KBucketPeer[]} closest - * @property {(key: Uint8Array) => KBucketPeer} closestPeer - * @property {(key: Uint8Array) => void} remove - * @property {(peer: KBucketPeer) => void} add - * @property {() => number} count - * @property {() => Iterable} toIterable - */ +const { TimeoutController } = require('timeout-abort-controller') /** - * Cannot generate random KadIds longer than this + 1 + * @typedef {import('./types').KBucketPeer} KBucketPeer + * @typedef {import('./types').KBucket} KBucket + * @typedef {import('./types').KBucketTree} KBucketTree + * @typedef {import('peer-id')} PeerId */ -const MAX_COMMON_PREFIX_LENGTH = 15 /** * A wrapper around `k-bucket`, to provide easy store and @@ -54,17 +20,18 @@ const MAX_COMMON_PREFIX_LENGTH = 15 */ class RoutingTable { /** - * @param {import('../')} dht - * @param {object} [options] - * @param {number} [options.kBucketSize=20] - * @param {number} [options.refreshInterval=30000] - * @param {number} [options.pingTimeout=10000] - */ - constructor (dht, { kBucketSize, refreshInterval, pingTimeout } = {}) { - this.peerId = dht.peerId - this.dht = dht + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('../types').Dialer} params.dialer + * @param {boolean} params.lan + * @param {number} [params.kBucketSize=20] + * @param {number} [params.pingTimeout=10000] + */ + constructor ({ peerId, dialer, kBucketSize, pingTimeout, lan }) { + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:routing-table`) + this._peerId = peerId + this._dialer = dialer this._kBucketSize = kBucketSize || 20 - this._refreshInterval = refreshInterval || 30000 this._pingTimeout = pingTimeout || 10000 /** @type {KBucketTree} */ @@ -76,233 +43,19 @@ class RoutingTable { /** @type {Date[]} */ this.commonPrefixLengthRefreshedAt = [] - this._refreshTable = this._refreshTable.bind(this) this._onPing = this._onPing.bind(this) this._pingQueue = new Queue({ concurrency: 1 }) } async start () { - this.kb.localNodeId = await utils.convertPeerId(this.peerId) + this.kb.localNodeId = await utils.convertPeerId(this._peerId) this.kb.on('ping', this._onPing) - - await this._refreshTable(true) } async stop () { - if (this._refreshTimeoutId) { - clearTimeout(this._refreshTimeoutId) - } - this._pingQueue.clear() } - /** - * To speed lookups, we seed the table with random PeerIds. This means - * when we are asked to locate a peer on the network, we can find a KadId - * that is close to the requested peer ID and query that, then network - * peers will tell us who they know who is close to the fake ID - * - * @param {boolean} [force=false] - */ - async _refreshTable (force) { - log('refreshing routing table') - - const prefixLength = this._maxCommonPrefix() - const refreshCpls = this._getTrackedCommonPrefixLengthsForRefresh(prefixLength) - - log(`max common prefix length ${prefixLength}`) - log(`tracked CPLs [ ${refreshCpls.map(date => `${date.getFullYear()}-${(date.getMonth() + 1).toString().padStart(2, '0')}-${date.getDate().toString().padStart(2, '0')} ${date.getHours().toString().padStart(2, '0')}:${date.getMinutes().toString().padStart(2, '0')}:${date.getSeconds().toString().padStart(2, '0')}`).join(', ')} ]`) - - /** - * If we see a gap at a common prefix length in the Routing table, we ONLY refresh up until - * the maximum cpl we have in the Routing Table OR (2 * (Cpl+ 1) with the gap), whichever - * is smaller. - * - * This is to prevent refreshes for Cpls that have no peers in the network but happen to be - * before a very high max Cpl for which we do have peers in the network. - * - * The number of 2 * (Cpl + 1) can be proved and a proof would have been written here if - * the programmer had paid more attention in the Math classes at university. - * - * So, please be patient and a doc explaining it will be published soon. - * - * https://github.com/libp2p/go-libp2p-kad-dht/commit/2851c88acb0a3f86bcfe3cfd0f4604a03db801d8#diff-ad45f4ba97ffbc4083c2eb87a4420c1157057b233f048030d67c6b551855ccf6R219 - */ - await Promise.all( - refreshCpls.map(async (lastRefresh, index) => { - try { - await this._refreshCommonPrefixLength(index, lastRefresh, force === true) - - if (this._numPeersForCpl(prefixLength) === 0) { - const lastCpl = Math.min(2 * (index + 1), refreshCpls.length - 1) - - for (let n = index + 1; n < lastCpl + 1; n++) { - try { - await this._refreshCommonPrefixLength(n, lastRefresh, force === true) - } catch (/** @type {any} */ err) { - log.error(err) - } - } - } - } catch (/** @type {any} */ err) { - log.error(err) - } - }) - ) - - this._refreshTimeoutId = setTimeout(this._refreshTable, this._refreshInterval) - // @ts-ignore - this._refreshTimeoutId.unref() - } - - /** - * @param {number} cpl - * @param {Date} lastRefresh - * @param {boolean} force - */ - async _refreshCommonPrefixLength (cpl, lastRefresh, force) { - if (!force && lastRefresh.getTime() > (Date.now() - this._refreshInterval)) { - log(`not running refresh for cpl ${cpl} as time since last refresh not above interval`) - return - } - - // gen a key for the query to refresh the cpl - const peerId = await this._generateRandomPeerId(cpl) - - log(`starting refreshing cpl ${cpl} with key ${peerId.toB58String()} (routing table size was ${this.kb.count()})`) - - const peers = await length(this.dht.getClosestPeers(peerId.toBytes(), {})) - - log(`found ${peers} peers that were close to imaginary peer ${peerId.toB58String()}`) - - log(`finished refreshing cpl ${cpl} with key ${peerId.toB58String()} (routing table size was ${this.kb.count()})`) - } - - /** - * @param {number} maxCommonPrefix - */ - _getTrackedCommonPrefixLengthsForRefresh (maxCommonPrefix) { - if (maxCommonPrefix > MAX_COMMON_PREFIX_LENGTH) { - maxCommonPrefix = MAX_COMMON_PREFIX_LENGTH - } - - const dates = [] - - for (let i = 0; i <= maxCommonPrefix; i++) { - // defaults to the zero value if we haven't refreshed it yet. - dates[i] = this.commonPrefixLengthRefreshedAt[i] || new Date() - } - - return dates - } - - /** - * - * @param {number} targetCommonPrefixLength - */ - async _generateRandomPeerId (targetCommonPrefixLength) { - const randomBytes = crypto.randomBytes(2) - const randomUint16 = (randomBytes[1] << 8) + randomBytes[0] - - const key = await this._makePeerId(this.kb.localNodeId, randomUint16, targetCommonPrefixLength) - - return PeerId.createFromBytes(key) - } - - /** - * @param {Uint8Array} localKadId - * @param {number} randomPrefix - * @param {number} targetCommonPrefixLength - */ - async _makePeerId (localKadId, randomPrefix, targetCommonPrefixLength) { - if (targetCommonPrefixLength > MAX_COMMON_PREFIX_LENGTH) { - throw new Error(`Cannot generate peer ID for common prefix length greater than ${MAX_COMMON_PREFIX_LENGTH}`) - } - - const view = new DataView(localKadId.buffer, localKadId.byteOffset, localKadId.byteLength) - const localPrefix = view.getUint16(0, false) - - // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. - // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L - // to our randomly generated prefix. - const toggledLocalPrefix = localPrefix ^ (0x8000 >> targetCommonPrefixLength) - - // Combine the toggled local prefix and the random bits at the correct offset - // such that ONLY the first `targetCommonPrefixLength` bits match the local ID. - const mask = 65535 << (16 - (targetCommonPrefixLength + 1)) - const targetPrefix = (toggledLocalPrefix & mask) | (randomPrefix & ~mask) - - // Convert to a known peer ID. - const keyPrefix = GENERATED_PREFIXES[targetPrefix] - - const keyBuffer = new ArrayBuffer(34) - const keyView = new DataView(keyBuffer, 0, keyBuffer.byteLength) - keyView.setUint8(0, sha256.code) - keyView.setUint8(1, 32) - keyView.setUint32(2, keyPrefix, false) - - return new Uint8Array(keyView.buffer, keyView.byteOffset, keyView.byteLength) - } - - /** - * returns the maximum common prefix length between any peer in the table - * and the current peer - */ - _maxCommonPrefix () { - if (!this.kb.localNodeId) { - return 0 - } - - // xor our KadId with every KadId in the k-bucket tree, - // return the longest id prefix that is the same - let prefixLength = 0 - - for (const length of this._prefixLengths()) { - if (length > prefixLength) { - prefixLength = length - } - } - - return prefixLength - } - - /** - * Returns the number of peers in the table with a given prefix length - * - * @param {number} prefixLength - */ - _numPeersForCpl (prefixLength) { - let count = 0 - - for (const length of this._prefixLengths()) { - if (length === prefixLength) { - count++ - } - } - - return count - } - - /** - * Yields the common prefix length of every peer in the table - */ - * _prefixLengths () { - for (const { id } of this.kb.toIterable()) { - const distance = uint8ArrayXor(this.kb.localNodeId, id) - let leadingZeros = 0 - - for (const byte of distance) { - if (byte === 0) { - leadingZeros++ - } else { - break - } - } - - yield leadingZeros - } - } - /** * Called on the `ping` event from `k-bucket` when a bucket is full * and cannot split. @@ -330,15 +83,15 @@ class RoutingTable { try { timeoutController = new TimeoutController(this._pingTimeout) - log(`Pinging old contact ${oldContact.peer.toB58String()}`) - const conn = await this.dht.libp2p.dialProtocol(oldContact.peer, PROTOCOL_DHT, { + this._log(`Pinging old contact ${oldContact.peer}`) + const { stream } = await this._dialer.dialProtocol(oldContact.peer, PROTOCOL_DHT, { signal: timeoutController.signal }) - await conn.close() + await stream.close() responded++ } catch (err) { - log.error('Could not ping peer', err) - log(`Evicting old contact after ping failed ${oldContact.peer.toB58String()}`) + this._log.error('Could not ping peer %p', oldContact.peer, err) + this._log(`Evicting old contact after ping failed ${oldContact.peer}`) this.kb.remove(oldContact.id) } finally { if (timeoutController) { @@ -349,11 +102,11 @@ class RoutingTable { ) if (responded < oldContacts.length) { - log(`Adding new contact ${newContact.peer.toB58String()}`) + this._log(`Adding new contact ${newContact.peer}`) this.kb.add(newContact) } } catch (err) { - log.error('Could not process k-bucket ping event', err) + this._log.error('Could not process k-bucket ping event', err) } }) } @@ -398,9 +151,9 @@ class RoutingTable { * Retrieve the `count`-closest peers to the given key. * * @param {Uint8Array} key - * @param {number} count + * @param {number} [count] - defaults to kBucketSize */ - closestPeers (key, count) { + closestPeers (key, count = this._kBucketSize) { const closest = this.kb.closest(key, count) return closest.map(p => p.peer) @@ -429,4 +182,4 @@ class RoutingTable { } } -module.exports = RoutingTable +module.exports.RoutingTable = RoutingTable diff --git a/src/routing-table/refresh.js b/src/routing-table/refresh.js new file mode 100644 index 00000000..4ccb717f --- /dev/null +++ b/src/routing-table/refresh.js @@ -0,0 +1,275 @@ +'use strict' + +const { xor: uint8ArrayXor } = require('uint8arrays/xor') +const GENERATED_PREFIXES = require('./generated-prefix-list.json') +const { sha256 } = require('multiformats/hashes/sha2') +const crypto = require('libp2p-crypto') +const PeerId = require('peer-id') +const utils = require('../utils') +const length = require('it-length') +const { TimeoutController } = require('timeout-abort-controller') + +/** + * @typedef {import('./types').KBucketPeer} KBucketPeer + * @typedef {import('./types').KBucket} KBucket + * @typedef {import('./types').KBucketTree} KBucketTree + */ + +/** + * Cannot generate random KadIds longer than this + 1 + */ +const MAX_COMMON_PREFIX_LENGTH = 15 + +/** + * A wrapper around `k-bucket`, to provide easy store and + * retrieval for peers. + */ +class RoutingTableRefresh { + /** + * @param {object} params + * @param {import('../peer-routing').PeerRouting} params.peerRouting + * @param {import('./').RoutingTable} params.routingTable + * @param {boolean} params.lan + * @param {number} [params.refreshInterval=30000] + */ + constructor ({ peerRouting, routingTable, refreshInterval, lan }) { + this._log = utils.logger(`libp2p:kad-dht:${lan ? 'lan' : 'wan'}:routing-table:refresh`) + this._peerRouting = peerRouting + this._routingTable = routingTable + this._refreshInterval = refreshInterval || 30000 + + /** @type {Date[]} */ + this.commonPrefixLengthRefreshedAt = [] + + this.refreshTable = this.refreshTable.bind(this) + } + + async start () { + this._log(`refreshing routing table every ${this._refreshInterval}ms`) + await this.refreshTable(true) + } + + async stop () { + if (this._refreshTimeoutId) { + clearTimeout(this._refreshTimeoutId) + } + } + + /** + * To speed lookups, we seed the table with random PeerIds. This means + * when we are asked to locate a peer on the network, we can find a KadId + * that is close to the requested peer ID and query that, then network + * peers will tell us who they know who is close to the fake ID + * + * @param {boolean} [force=false] + */ + async refreshTable (force) { + this._log('refreshing routing table') + + const prefixLength = this._maxCommonPrefix() + const refreshCpls = this._getTrackedCommonPrefixLengthsForRefresh(prefixLength) + + this._log(`max common prefix length ${prefixLength}`) + this._log(`tracked CPLs [ ${refreshCpls.map(date => date.toISOString()).join(', ')} ]`) + + /** + * If we see a gap at a common prefix length in the Routing table, we ONLY refresh up until + * the maximum cpl we have in the Routing Table OR (2 * (Cpl+ 1) with the gap), whichever + * is smaller. + * + * This is to prevent refreshes for Cpls that have no peers in the network but happen to be + * before a very high max Cpl for which we do have peers in the network. + * + * The number of 2 * (Cpl + 1) can be proved and a proof would have been written here if + * the programmer had paid more attention in the Math classes at university. + * + * So, please be patient and a doc explaining it will be published soon. + * + * https://github.com/libp2p/go-libp2p-kad-dht/commit/2851c88acb0a3f86bcfe3cfd0f4604a03db801d8#diff-ad45f4ba97ffbc4083c2eb87a4420c1157057b233f048030d67c6b551855ccf6R219 + */ + await Promise.all( + refreshCpls.map(async (lastRefresh, index) => { + try { + await this._refreshCommonPrefixLength(index, lastRefresh, force === true) + + if (this._numPeersForCpl(prefixLength) === 0) { + const lastCpl = Math.min(2 * (index + 1), refreshCpls.length - 1) + + for (let n = index + 1; n < lastCpl + 1; n++) { + try { + await this._refreshCommonPrefixLength(n, lastRefresh, force === true) + } catch (/** @type {any} */ err) { + this._log.error(err) + } + } + } + } catch (/** @type {any} */ err) { + this._log.error(err) + } + }) + ) + + this._refreshTimeoutId = setTimeout(this.refreshTable, this._refreshInterval) + + // @ts-ignore + if (this._refreshTimeoutId.unref) { + // @ts-ignore + this._refreshTimeoutId.unref() + } + } + + /** + * @param {number} cpl + * @param {Date} lastRefresh + * @param {boolean} force + */ + async _refreshCommonPrefixLength (cpl, lastRefresh, force) { + if (!force && lastRefresh.getTime() > (Date.now() - this._refreshInterval)) { + this._log('not running refresh for cpl %s as time since last refresh not above interval', cpl) + return + } + + // gen a key for the query to refresh the cpl + const peerId = await this._generateRandomPeerId(cpl) + + this._log('starting refreshing cpl %s with key %p (routing table size was %s)', cpl, peerId, this._routingTable.kb.count()) + + const controller = new TimeoutController(60000) + + try { + const peers = await length(this._peerRouting.getClosestPeers(peerId.toBytes(), { signal: controller.signal })) + + this._log(`found ${peers} peers that were close to imaginary peer %p`, peerId) + this._log('finished refreshing cpl %s with key %p (routing table size is now %s)', cpl, peerId, this._routingTable.kb.count()) + } finally { + controller.clear() + } + } + + /** + * @param {number} maxCommonPrefix + */ + _getTrackedCommonPrefixLengthsForRefresh (maxCommonPrefix) { + if (maxCommonPrefix > MAX_COMMON_PREFIX_LENGTH) { + maxCommonPrefix = MAX_COMMON_PREFIX_LENGTH + } + + const dates = [] + + for (let i = 0; i <= maxCommonPrefix; i++) { + // defaults to the zero value if we haven't refreshed it yet. + dates[i] = this.commonPrefixLengthRefreshedAt[i] || new Date() + } + + return dates + } + + /** + * + * @param {number} targetCommonPrefixLength + */ + async _generateRandomPeerId (targetCommonPrefixLength) { + const randomBytes = crypto.randomBytes(2) + const randomUint16 = (randomBytes[1] << 8) + randomBytes[0] + + const key = await this._makePeerId(this._routingTable.kb.localNodeId, randomUint16, targetCommonPrefixLength) + + return PeerId.createFromBytes(key) + } + + /** + * @param {Uint8Array} localKadId + * @param {number} randomPrefix + * @param {number} targetCommonPrefixLength + */ + async _makePeerId (localKadId, randomPrefix, targetCommonPrefixLength) { + if (targetCommonPrefixLength > MAX_COMMON_PREFIX_LENGTH) { + throw new Error(`Cannot generate peer ID for common prefix length greater than ${MAX_COMMON_PREFIX_LENGTH}`) + } + + const view = new DataView(localKadId.buffer, localKadId.byteOffset, localKadId.byteLength) + const localPrefix = view.getUint16(0, false) + + // For host with ID `L`, an ID `K` belongs to a bucket with ID `B` ONLY IF CommonPrefixLen(L,K) is EXACTLY B. + // Hence, to achieve a targetPrefix `T`, we must toggle the (T+1)th bit in L & then copy (T+1) bits from L + // to our randomly generated prefix. + const toggledLocalPrefix = localPrefix ^ (0x8000 >> targetCommonPrefixLength) + + // Combine the toggled local prefix and the random bits at the correct offset + // such that ONLY the first `targetCommonPrefixLength` bits match the local ID. + const mask = 65535 << (16 - (targetCommonPrefixLength + 1)) + const targetPrefix = (toggledLocalPrefix & mask) | (randomPrefix & ~mask) + + // Convert to a known peer ID. + const keyPrefix = GENERATED_PREFIXES[targetPrefix] + + const keyBuffer = new ArrayBuffer(34) + const keyView = new DataView(keyBuffer, 0, keyBuffer.byteLength) + keyView.setUint8(0, sha256.code) + keyView.setUint8(1, 32) + keyView.setUint32(2, keyPrefix, false) + + return new Uint8Array(keyView.buffer, keyView.byteOffset, keyView.byteLength) + } + + /** + * returns the maximum common prefix length between any peer in the table + * and the current peer + */ + _maxCommonPrefix () { + if (!this._routingTable.kb.localNodeId) { + return 0 + } + + // xor our KadId with every KadId in the k-bucket tree, + // return the longest id prefix that is the same + let prefixLength = 0 + + for (const length of this._prefixLengths()) { + if (length > prefixLength) { + prefixLength = length + } + } + + return prefixLength + } + + /** + * Returns the number of peers in the table with a given prefix length + * + * @param {number} prefixLength + */ + _numPeersForCpl (prefixLength) { + let count = 0 + + for (const length of this._prefixLengths()) { + if (length === prefixLength) { + count++ + } + } + + return count + } + + /** + * Yields the common prefix length of every peer in the table + */ + * _prefixLengths () { + for (const { id } of this._routingTable.kb.toIterable()) { + const distance = uint8ArrayXor(this._routingTable.kb.localNodeId, id) + let leadingZeros = 0 + + for (const byte of distance) { + if (byte === 0) { + leadingZeros++ + } else { + break + } + } + + yield leadingZeros + } + } +} + +module.exports.RoutingTableRefresh = RoutingTableRefresh diff --git a/src/routing-table/types.ts b/src/routing-table/types.ts new file mode 100644 index 00000000..eb88540e --- /dev/null +++ b/src/routing-table/types.ts @@ -0,0 +1,26 @@ +import type PeerId from 'peer-id' + +export interface KBucketPeer { + id: Uint8Array + peer: PeerId +} + +export interface KBucket { + id: Uint8Array + contacts: KBucketPeer[] + dontSplit: boolean + left: KBucket + right: KBucket +} + +export interface KBucketTree { + root: KBucket + localNodeId: Uint8Array + on: (event: 'ping', callback: (oldContacts: KBucketPeer[], newContact: KBucketPeer) => void) => void + closest: (key: Uint8Array, count: number) => KBucketPeer[] + closestPeer: (key: Uint8Array) => KBucketPeer + remove: (key: Uint8Array) => void + add: (peer: KBucketPeer) => void + count: () => number + toIterable: () => Iterable +} diff --git a/src/rpc/handlers/add-provider.js b/src/rpc/handlers/add-provider.js index b7dbd535..08e0f2da 100644 --- a/src/rpc/handlers/add-provider.js +++ b/src/rpc/handlers/add-provider.js @@ -2,26 +2,36 @@ const { CID } = require('multiformats/cid') const errcode = require('err-code') - const utils = require('../../utils') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:add-provider') /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../../message')} Message + * @typedef {import('../../message').Message} Message + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:add-provider') +class AddProviderHandler { + /** + * @param {object} params + * @param {PeerId} params.peerId + * @param {import('../../providers').Providers} params.providers + * @param {import('../../types').PeerStore} params.peerStore + */ + constructor ({ peerId, providers, peerStore }) { + this._peerId = peerId + this._providers = providers + this._peerStore = peerStore + } + /** - * Process `AddProvider` DHT messages. - * * @param {PeerId} peerId * @param {Message} msg */ - async function addProvider (peerId, msg) { // eslint-disable-line require-await + async handle (peerId, msg) { log('start') if (!msg.key || msg.key.length === 0) { @@ -31,41 +41,43 @@ module.exports = (dht) => { /** @type {CID} */ let cid try { + // this is actually just the multihash, not the whole CID cid = CID.decode(msg.key) } catch (/** @type {any} */ err) { const errMsg = `Invalid CID: ${err.message}` throw errcode(new Error(errMsg), 'ERR_INVALID_CID') } - msg.providerPeers.forEach((pi) => { - // Ignore providers not from the originator - if (!pi.id.isEqual(peerId)) { - log('invalid provider peer %s from %s', pi.id.toB58String(), peerId.toB58String()) - return - } + if (!msg.providerPeers || !msg.providerPeers.length) { + log.error('no providers found in message') + } - if (pi.multiaddrs.length < 1) { - log('no valid addresses for provider %s. Ignore', peerId.toB58String()) - return - } + await Promise.all( + msg.providerPeers.map(async (pi) => { + // Ignore providers not from the originator + if (!pi.id.equals(peerId)) { + log('invalid provider peer %p from %p', pi.id, peerId) + return + } - log('received provider %s for %s (addrs %s)', peerId.toB58String(), cid.toString(), pi.multiaddrs.map((m) => m.toString())) + if (pi.multiaddrs.length < 1) { + log('no valid addresses for provider %p. Ignore', peerId) + return + } - if (!dht._isSelf(pi.id)) { - // Add known address to peer store - dht.peerStore.addressBook.add(pi.id, pi.multiaddrs) - return dht.providers.addProvider(cid, pi.id) - } - }) + log('received provider %p for %s (addrs %s)', peerId, cid, pi.multiaddrs.map((m) => m.toString())) - // Previous versions of the JS DHT sent erroneous providers in the - // `providerPeers` field. In order to accommodate older clients that have - // this bug, we fall back to assuming the originator is the provider if - // we can't find any valid providers in the payload. - // https://github.com/libp2p/js-libp2p-kad-dht/pull/127 - // https://github.com/libp2p/js-libp2p-kad-dht/issues/128 - return dht.providers.addProvider(cid, peerId) - } + if (!this._peerId.equals(pi.id)) { + // Add known address to peer store + this._peerStore.addressBook.add(pi.id, pi.multiaddrs) + await this._providers.addProvider(cid, pi.id) + } + }) + ) - return addProvider + // typescript requires a return value + return undefined + } } + +module.exports.AddProviderHandler = AddProviderHandler diff --git a/src/rpc/handlers/find-node.js b/src/rpc/handlers/find-node.js index cc34bbb5..73270928 100644 --- a/src/rpc/handlers/find-node.js +++ b/src/rpc/handlers/find-node.js @@ -1,49 +1,69 @@ 'use strict' -const { equals: uint8ArrayEquals } = require('uint8arrays/equals') - -const Message = require('../../message') +const { Message } = require('../../message') const utils = require('../../utils') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:find-node') +const { + removePrivateAddresses, + removePublicAddresses +} = require('../../utils') /** * @typedef {import('peer-id')} PeerId + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:find-node') +class FindNodeHandler { + /** + * @param {object} params + * @param {PeerId} params.peerId + * @param {import('../../types').Addressable} params.addressable + * @param {import('../../peer-routing').PeerRouting} params.peerRouting + * @param {boolean} [params.lan] + */ + constructor ({ peerId, addressable, peerRouting, lan }) { + this._peerId = peerId + this._addressable = addressable + this._peerRouting = peerRouting + this._lan = Boolean(lan) + } /** - * Process `FindNode` DHT messages. + * Process `FindNode` DHT messages * * @param {PeerId} peerId * @param {Message} msg */ - async function findNode (peerId, msg) { - log('start') + async handle (peerId, msg) { + log('incoming request from %p for peers closer to %b', peerId, msg.key) let closer - if (uint8ArrayEquals(msg.key, dht.peerId.id)) { + if (this._peerId.equals(msg.key)) { closer = [{ - id: dht.peerId, - multiaddrs: dht.libp2p.multiaddrs + id: this._peerId, + multiaddrs: this._addressable.multiaddrs }] } else { - closer = await dht._betterPeersToQuery(msg, peerId) + closer = await this._peerRouting.getCloserPeersOffline(msg.key, peerId) } + closer = closer + .map(this._lan ? removePublicAddresses : removePrivateAddresses) + .filter(({ multiaddrs }) => multiaddrs.length) + const response = new Message(msg.type, new Uint8Array(0), msg.clusterLevel) if (closer.length > 0) { response.closerPeers = closer } else { - log('handle FindNode %s: could not find anything', peerId.toB58String()) + log('could not find any peers closer to %p', peerId) } return response } - - return findNode } + +module.exports.FindNodeHandler = FindNodeHandler diff --git a/src/rpc/handlers/get-providers.js b/src/rpc/handlers/get-providers.js index 56590ff5..627fce77 100644 --- a/src/rpc/handlers/get-providers.js +++ b/src/rpc/handlers/get-providers.js @@ -2,19 +2,42 @@ const { CID } = require('multiformats/cid') const errcode = require('err-code') - -const Message = require('../../message') +const { Message } = require('../../message') const utils = require('../../utils') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:get-providers') +const { + removePrivateAddresses, + removePublicAddresses +} = require('../../utils') /** * @typedef {import('peer-id')} PeerId + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:get-providers') +class GetProvidersHandler { + /** + * @param {object} params + * @param {PeerId} params.peerId + * @param {import('../../peer-routing').PeerRouting} params.peerRouting + * @param {import('../../providers').Providers} params.providers + * @param {import('interface-datastore').Datastore} params.datastore + * @param {import('../../types').PeerStore} params.peerStore + * @param {import('../../types').Addressable} params.addressable + * @param {boolean} [params.lan] + */ + constructor ({ peerId, peerRouting, providers, datastore, peerStore, addressable, lan }) { + this._peerId = peerId + this._peerRouting = peerRouting + this._providers = providers + this._datastore = datastore + this._peerStore = peerStore + this._addressable = addressable + this._lan = Boolean(lan) + } /** * Process `GetProviders` DHT messages. @@ -22,7 +45,7 @@ module.exports = (dht) => { * @param {PeerId} peerId * @param {Message} msg */ - async function getProviders (peerId, msg) { + async handle (peerId, msg) { let cid try { cid = CID.decode(msg.key) @@ -30,29 +53,42 @@ module.exports = (dht) => { throw errcode(new Error(`Invalid CID: ${err.message}`), 'ERR_INVALID_CID') } - log('%s', cid.toString()) + log('%p asking for providers for %s', peerId, cid.toString()) const dsKey = utils.bufferToKey(cid.bytes) const [has, peers, closer] = await Promise.all([ - dht.datastore.has(dsKey), - dht.providers.getProviders(cid), - dht._betterPeersToQuery(msg, peerId) + this._datastore.has(dsKey), + this._providers.getProviders(cid), + this._peerRouting.getCloserPeersOffline(msg.key, peerId) ]) - const providerPeers = peers.map((peerId) => ({ - id: peerId, - multiaddrs: [] - })) - const closerPeers = closer.map((c) => ({ - id: c.id, - multiaddrs: [] - })) + const providerPeers = peers + .map((provider) => ({ + id: provider, + multiaddrs: (this._peerStore.addressBook.get(provider) || []).map(address => address.multiaddr) + })) + .map(this._lan ? removePublicAddresses : removePrivateAddresses) + .filter(({ multiaddrs }) => multiaddrs.length) + + const closerPeers = closer + .map((closer) => ({ + id: closer.id, + multiaddrs: (this._peerStore.addressBook.get(closer.id) || []).map(address => address.multiaddr) + })) + .map(this._lan ? removePublicAddresses : removePrivateAddresses) + .filter(({ multiaddrs }) => multiaddrs.length) if (has) { - providerPeers.push({ - id: dht.peerId, - multiaddrs: [] + const mapper = this._lan ? removePublicAddresses : removePrivateAddresses + + const ourRecord = mapper({ + id: this._peerId, + multiaddrs: this._addressable.multiaddrs }) + + if (ourRecord.multiaddrs.length) { + providerPeers.push(ourRecord) + } } const response = new Message(msg.type, msg.key, msg.clusterLevel) @@ -68,6 +104,6 @@ module.exports = (dht) => { log('got %s providers %s closerPeers', providerPeers.length, closerPeers.length) return response } - - return getProviders } + +module.exports.GetProvidersHandler = GetProvidersHandler diff --git a/src/rpc/handlers/get-value.js b/src/rpc/handlers/get-value.js index 815641e7..929d3c27 100644 --- a/src/rpc/handlers/get-value.js +++ b/src/rpc/handlers/get-value.js @@ -1,33 +1,48 @@ 'use strict' const { Record } = require('libp2p-record') - const errcode = require('err-code') - -const Message = require('../../message') +const { Message } = require('../../message') +const { + MAX_RECORD_AGE +} = require('../../constants') const utils = require('../../utils') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:get-value') + /** * @typedef {import('peer-id')} PeerId + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:get-value') +class GetValueHandler { + /** + * @param {object} params + * @param {PeerId} params.peerId + * @param {import('../../types').PeerStore} params.peerStore + * @param {import('../../peer-routing').PeerRouting} params.peerRouting + * @param {import('interface-datastore').Datastore} params.datastore + */ + constructor ({ peerId, peerStore, peerRouting, datastore }) { + this._peerId = peerId + this._peerStore = peerStore + this._peerRouting = peerRouting + this._datastore = datastore + } /** * Process `GetValue` DHT messages. * * @param {PeerId} peerId * @param {Message} msg - * @returns {Promise} */ - async function getValue (peerId, msg) { + async handle (peerId, msg) { const key = msg.key - log('key: %b', key) + log('%p asked for key %b', peerId, key) if (!key || key.length === 0) { throw errcode(new Error('Invalid key'), 'ERR_INVALID_KEY') @@ -40,10 +55,10 @@ module.exports = (dht) => { const idFromKey = utils.fromPublicKeyKey(key) let id - if (dht._isSelf(idFromKey)) { - id = dht.peerId + if (this._peerId.equals(idFromKey)) { + id = this._peerId } else { - const peerData = dht.peerStore.get(idFromKey) + const peerData = this._peerStore.get(idFromKey) id = peerData && peerData.id } @@ -55,22 +70,64 @@ module.exports = (dht) => { } const [record, closer] = await Promise.all([ - dht._checkLocalDatastore(key), - dht._betterPeersToQuery(msg, peerId) + this._checkLocalDatastore(key), + this._peerRouting.getCloserPeersOffline(msg.key, peerId) ]) if (record) { - log('got record') + log('had record for %b in local datastore', key) response.record = record } if (closer.length > 0) { - log('got closer %s', closer.length) + log('had %s closer peers in routing table', closer.length) response.closerPeers = closer } return response } - return getValue + /** + * Try to fetch a given record by from the local datastore. + * Returns the record iff it is still valid, meaning + * - it was either authored by this node, or + * - it was received less than `MAX_RECORD_AGE` ago. + * + * @param {Uint8Array} key + */ + async _checkLocalDatastore (key) { + log('checkLocalDatastore looking for %b', key) + const dsKey = utils.bufferToKey(key) + + // Fetch value from ds + let rawRecord + try { + rawRecord = await this._datastore.get(dsKey) + } catch (/** @type {any} */ err) { + if (err.code === 'ERR_NOT_FOUND') { + return undefined + } + throw err + } + + // Create record from the returned bytes + const record = Record.deserialize(rawRecord) + + if (!record) { + throw errcode(new Error('Invalid record'), 'ERR_INVALID_RECORD') + } + + // Check validity: compare time received with max record age + if (record.timeReceived == null || + Date.now() - record.timeReceived.getTime() > MAX_RECORD_AGE) { + // If record is bad delete it and return + await this._datastore.delete(dsKey) + return undefined + } + + // Record is valid + return record + } } + +module.exports.GetValueHandler = GetValueHandler diff --git a/src/rpc/handlers/index.js b/src/rpc/handlers/index.js index 8b655eaf..166cff33 100644 --- a/src/rpc/handlers/index.js +++ b/src/rpc/handlers/index.js @@ -1,19 +1,37 @@ 'use strict' -const T = require('../../message').TYPES +const { Message } = require('../../message') +const { AddProviderHandler } = require('./add-provider') +const { FindNodeHandler } = require('./find-node') +const { GetProvidersHandler } = require('./get-providers') +const { GetValueHandler } = require('./get-value') +const { PingHandler } = require('./ping') +const { PutValueHandler } = require('./put-value') /** - * - * @param {import('../../index')} dht + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ -module.exports = (dht) => { + +/** + * @param {object} params + * @param {import('peer-id')} params.peerId + * @param {import('../../providers').Providers} params.providers + * @param {import('../../types').PeerStore} params.peerStore + * @param {import('../../types').Addressable} params.addressable + * @param {import('../../peer-routing').PeerRouting} params.peerRouting + * @param {import('interface-datastore').Datastore} params.datastore + * @param {import('libp2p-interfaces/src/types').DhtValidators} params.validators + * @param {boolean} [params.lan] + */ +module.exports = ({ peerId, providers, peerStore, addressable, peerRouting, datastore, validators, lan }) => { + /** @type {Record} */ const handlers = { - [T.GET_VALUE]: require('./get-value')(dht), - [T.PUT_VALUE]: require('./put-value')(dht), - [T.FIND_NODE]: require('./find-node')(dht), - [T.ADD_PROVIDER]: require('./add-provider')(dht), - [T.GET_PROVIDERS]: require('./get-providers')(dht), - [T.PING]: require('./ping')(dht) + [Message.TYPES.GET_VALUE]: new GetValueHandler({ peerId, peerStore, peerRouting, datastore }), + [Message.TYPES.PUT_VALUE]: new PutValueHandler({ validators, datastore }), + [Message.TYPES.FIND_NODE]: new FindNodeHandler({ peerId, addressable, peerRouting, lan }), + [Message.TYPES.ADD_PROVIDER]: new AddProviderHandler({ peerId, providers, peerStore }), + [Message.TYPES.GET_PROVIDERS]: new GetProvidersHandler({ peerId, peerRouting, providers, datastore, peerStore, addressable, lan }), + [Message.TYPES.PING]: new PingHandler() } /** @@ -22,7 +40,6 @@ module.exports = (dht) => { * @param {number} type */ function getMessageHandler (type) { - // @ts-ignore ts does not aknowledge number as an index type return handlers[type] } diff --git a/src/rpc/handlers/ping.js b/src/rpc/handlers/ping.js index 83a98a6c..9b08c74d 100644 --- a/src/rpc/handlers/ping.js +++ b/src/rpc/handlers/ping.js @@ -1,28 +1,28 @@ 'use strict' const utils = require('../../utils') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:ping') /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../../message')} Message + * @typedef {import('../../message').Message} Message + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:ping') - +class PingHandler { /** * Process `Ping` DHT messages. * * @param {PeerId} peerId * @param {Message} msg */ - function ping (peerId, msg) { - log('from %s', peerId.toB58String()) + async handle (peerId, msg) { + log(`ping from ${peerId}`) return msg } - - return ping } + +module.exports.PingHandler = PingHandler diff --git a/src/rpc/handlers/put-value.js b/src/rpc/handlers/put-value.js index a9e1bef7..22bb9028 100644 --- a/src/rpc/handlers/put-value.js +++ b/src/rpc/handlers/put-value.js @@ -2,17 +2,28 @@ const utils = require('../../utils') const errcode = require('err-code') +const Libp2pRecord = require('libp2p-record') +const log = utils.logger('libp2p:kad-dht:rpc:handlers:put-value') /** * @typedef {import('peer-id')} PeerId - * @typedef {import('../../message')} Message + * @typedef {import('../../message').Message} Message + * @typedef {import('../types').DHTMessageHandler} DHTMessageHandler */ /** - * @param {import('../../index')} dht + * @implements {DHTMessageHandler} */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc:put-value') +class PutValueHandler { + /** + * @param {object} params + * @param {import('libp2p-interfaces/src/types').DhtValidators} params.validators + * @param {import('interface-datastore').Datastore} params.datastore + */ + constructor ({ validators, datastore }) { + this._validators = validators + this._datastore = datastore + } /** * Process `PutValue` DHT messages. @@ -20,9 +31,9 @@ module.exports = (dht) => { * @param {PeerId} peerId * @param {Message} msg */ - async function putValue (peerId, msg) { + async handle (peerId, msg) { const key = msg.key - log('key: %b', key) + log('%p asked to store value for key %b', peerId, key) const record = msg.record @@ -33,16 +44,14 @@ module.exports = (dht) => { throw errcode(new Error(errMsg), 'ERR_EMPTY_RECORD') } - await dht._verifyRecordLocally(record) + await Libp2pRecord.validator.verifyRecord(this._validators, record) record.timeReceived = new Date() const recordKey = utils.bufferToKey(record.key) - await dht.datastore.put(recordKey, record.serialize()) - - dht.onPut(record, peerId) + await this._datastore.put(recordKey, record.serialize()) return msg } - - return putValue } + +module.exports.PutValueHandler = PutValueHandler diff --git a/src/rpc/index.js b/src/rpc/index.js index ddae8ac9..f6f5c7e7 100644 --- a/src/rpc/index.js +++ b/src/rpc/index.js @@ -3,21 +3,37 @@ const { pipe } = require('it-pipe') const lp = require('it-length-prefixed') -const Message = require('../message') +const { Message, MESSAGE_TYPE_LOOKUP } = require('../message') const handlers = require('./handlers') const utils = require('../utils') +const log = utils.logger('libp2p:kad-dht:rpc') + /** * @typedef {import('peer-id')} PeerId * @typedef {import('libp2p-interfaces/src/stream-muxer/types').MuxedStream} MuxedStream */ /** - * @param {import('../index')} dht + * @param {import('../types').DHT} dht */ -module.exports = (dht) => { - const log = utils.logger(dht.peerId, 'rpc') - const getMessageHandler = handlers(dht) +class RPC { + /** + * @param {object} params + * @param {import('../routing-table').RoutingTable} params.routingTable + * @param {import('peer-id')} params.peerId + * @param {import('../providers').Providers} params.providers + * @param {import('../types').PeerStore} params.peerStore + * @param {import('../types').Addressable} params.addressable + * @param {import('../peer-routing').PeerRouting} params.peerRouting + * @param {import('interface-datastore').Datastore} params.datastore + * @param {import('libp2p-interfaces/src/types').DhtValidators} params.validators + * @param {boolean} [params.lan] + */ + constructor (params) { + this._messageHandler = handlers(params) + this._routingTable = params.routingTable + } /** * Process incoming DHT messages. @@ -25,12 +41,12 @@ module.exports = (dht) => { * @param {PeerId} peerId * @param {Message} msg */ - async function handleMessage (peerId, msg) { + async handleMessage (peerId, msg) { // get handler & execute it - const handler = getMessageHandler(msg.type) + const handler = this._messageHandler(msg.type) try { - await dht._add(peerId) + await this._routingTable.add(peerId) } catch (/** @type {any} */ err) { log.error('Failed to update the kbucket store', err) } @@ -40,7 +56,7 @@ module.exports = (dht) => { return } - return handler(peerId, msg) + return handler.handle(peerId, msg) } /** @@ -50,17 +66,16 @@ module.exports = (dht) => { * @param {MuxedStream} props.stream * @param {import('libp2p-interfaces/src/connection').Connection} props.connection */ - async function onIncomingStream ({ stream, connection }) { + async onIncomingStream ({ stream, connection }) { const peerId = connection.remotePeer try { - await dht._add(peerId) + await this._routingTable.add(peerId) } catch (/** @type {any} */ err) { log.error(err) } - const idB58Str = peerId.toB58String() - log('from: %s', idB58Str) + const self = this await pipe( stream.source, @@ -72,7 +87,8 @@ module.exports = (dht) => { for await (const msg of source) { // handle the message const desMessage = Message.deserialize(msg.slice()) - const res = await handleMessage(peerId, desMessage) + log('incoming %s from %p', MESSAGE_TYPE_LOOKUP[desMessage.type], peerId) + const res = await self.handleMessage(peerId, desMessage) // Not all handlers will return a response if (res) { @@ -84,6 +100,6 @@ module.exports = (dht) => { stream.sink ) } - - return onIncomingStream } + +module.exports.RPC = RPC diff --git a/src/rpc/types.ts b/src/rpc/types.ts new file mode 100644 index 00000000..eb42add0 --- /dev/null +++ b/src/rpc/types.ts @@ -0,0 +1,6 @@ +import type PeerId from 'peer-id' +import type { Message } from '../message' + +export interface DHTMessageHandler { + handle: (peerId: PeerId, msg: Message) => Promise +} diff --git a/src/topology-listener.js b/src/topology-listener.js new file mode 100644 index 00000000..7e558fde --- /dev/null +++ b/src/topology-listener.js @@ -0,0 +1,61 @@ +'use strict' + +const MulticodecTopology = require('libp2p-interfaces/src/topology/multicodec-topology') +const { EventEmitter } = require('events') + +/** + * Receives notifications of new peers joining the network that support the DHT protocol + */ +class TopologyListener extends EventEmitter { + /** + * Create a new network + * + * @param {object} params + * @param {import('./types').Registrar} params.registrar + * @param {string} params.protocol + */ + constructor ({ registrar, protocol }) { + super() + + this._running = false + this._registrar = registrar + this._protocol = protocol + } + + /** + * Start the network + */ + start () { + if (this._running) { + return + } + + this._running = true + + // register protocol with topology + const topology = new MulticodecTopology({ + multicodecs: [this._protocol], + handlers: { + onConnect: (peerId) => { + this.emit('peer', peerId) + }, + onDisconnect: () => {} + } + }) + this._registrarId = this._registrar.register(topology) + } + + /** + * Stop all network activity + */ + stop () { + this._running = false + + // unregister protocol and handlers + if (this._registrarId) { + this._registrar.unregister(this._registrarId) + } + } +} + +module.exports.TopologyListener = TopologyListener diff --git a/src/types.ts b/src/types.ts new file mode 100644 index 00000000..74c95b4e --- /dev/null +++ b/src/types.ts @@ -0,0 +1,181 @@ +import type PeerId from 'peer-id' +import type { Multiaddr } from 'multiaddr' +import type { CID } from 'multiformats/cid' +import type { MuxedStream } from 'libp2p/src/upgrader' +import type Topology from 'libp2p-interfaces/src/topology' +import type { PublicKey } from 'libp2p-crypto' +import type { Message } from './message/dht' + +export enum EventTypes { + SENDING_QUERY = 0, + PEER_RESPONSE, + FINAL_PEER, + QUERY_ERROR, + PROVIDER, + VALUE, + ADDING_PEER, + DIALING_PEER +} + +export type MessageName = keyof typeof Message.MessageType + +export interface PeerData { + id: PeerId + multiaddrs: Multiaddr[] +} + +export interface DHTRecord { + key: Uint8Array + value: Uint8Array + timeReceived?: Date +} + +export interface AbortOptions { + signal?: AbortSignal +} + +export interface QueryOptions extends AbortOptions { + queryFuncTimeout?: number +} + +/** + * Emitted when sending queries to remote peers + */ +export interface SendingQueryEvent { + to: PeerId + type: EventTypes.SENDING_QUERY + name: 'SENDING_QUERY' + messageName: keyof typeof Message.MessageType + messageType: Message.MessageType +} + +/** + * Emitted when query responses are received form remote peers. Depending on the query + * these events may be followed by a `FinalPeerEvent`, a `ValueEvent` or a `ProviderEvent`. + */ +export interface PeerResponseEvent { + from: PeerId + type: EventTypes.PEER_RESPONSE + name: 'PEER_RESPONSE' + messageName: keyof typeof Message.MessageType + messageType: Message.MessageType + closer: PeerData[] + providers: PeerData[] + record?: DHTRecord +} + +/** + * Emitted at the end of a `findPeer` query + */ +export interface FinalPeerEvent { + from: PeerId + peer: PeerData + type: EventTypes.FINAL_PEER + name: 'FINAL_PEER' +} + +/** + * Something went wrong with the query + */ +export interface QueryErrorEvent { + from: PeerId + type: EventTypes.QUERY_ERROR + name: 'QUERY_ERROR' + error: Error +} + +/** + * Emitted when providers are found + */ +export interface ProviderEvent { + from: PeerId + type: EventTypes.PROVIDER + name: 'PROVIDER' + providers: PeerData[] +} + +/** + * Emitted when values are found + */ +export interface ValueEvent { + from: PeerId + type: EventTypes.VALUE + name: 'VALUE' + value: Uint8Array +} + +/** + * Emitted when peers are added to a query + */ +export interface AddingPeerEvent { + type: EventTypes.ADDING_PEER + name: 'ADDING_PEER' + peer: PeerId +} + +/** + * Emitted when peers are dialled as part of a query + */ +export interface DialingPeerEvent { + peer: PeerId + type: EventTypes.DIALING_PEER + name: 'DIALING_PEER' +} + +export type QueryEvent = SendingQueryEvent | PeerResponseEvent | FinalPeerEvent | QueryErrorEvent | ProviderEvent | ValueEvent | AddingPeerEvent | DialingPeerEvent + +export interface DHT { + // query/client methods + + /** + * Get a value from the DHT, the final ValueEvent will be the best value + */ + get: (key: Uint8Array, options?: QueryOptions) => AsyncIterable + findProviders: (key: CID, options?: QueryOptions) => AsyncIterable + findPeer: (id: PeerId, options?: QueryOptions) => AsyncIterable + getClosestPeers: (key: Uint8Array, options?: QueryOptions) => AsyncIterable + getPublicKey: (peer: PeerId, options?: QueryOptions) => Promise + + // publish/server methods + provide: (key: CID, options?: QueryOptions) => AsyncIterable + put: (key: Uint8Array, value: Uint8Array, options?: QueryOptions) => AsyncIterable + + // enable/disable publishing + enableServerMode: () => void + enableClientMode: () => void + + // housekeeping + removeLocal: (key: Uint8Array) => Promise + refreshRoutingTable: () => Promise + + // events + on: (event: 'peer', handler: (peerData: PeerData) => void) => this +} + +// Implemented by libp2p, should be moved to libp2p-interfaces eventually +export interface Dialer { + dialProtocol: (peer: PeerId, protocol: string, options?: { signal?: AbortSignal }) => Promise<{ stream: MuxedStream }> +} + +// Implemented by libp2p, should be moved to libp2p-interfaces eventually +export interface Addressable { + multiaddrs: Multiaddr[] +} + +// Implemented by libp2p.registrar, should be moved to libp2p-interfaces eventually +export interface Registrar { + register: (topology: Topology) => string + unregister: (id: string) => boolean +} + +// Implemented by libp2p.peerStore, should be moved to libp2p-interfaces eventually +export interface PeerStore { + addressBook: AddressBook + get: (peerId: PeerId) => { id: PeerId, addresses: Array<{ multiaddr: Multiaddr }> } | undefined +} + +// Implemented by libp2p.peerStore.addressStore, should be moved to libp2p-interfaces eventually +export interface AddressBook { + add: (peerId: PeerId, addresses: Multiaddr[]) => void + get: (peerId: PeerId) => Array<{ multiaddr: Multiaddr }> | undefined +} diff --git a/src/utils.js b/src/utils.js index 495f175b..9b3ddba8 100644 --- a/src/utils.js +++ b/src/utils.js @@ -3,17 +3,55 @@ const debug = require('debug') const { sha256 } = require('multiformats/hashes/sha2') const { base58btc } = require('multiformats/bases/base58') +const { base32 } = require('multiformats/bases/base32') const { Key } = require('interface-datastore/key') -const { xor: uint8ArrayXor } = require('uint8arrays/xor') -const { compare: uint8ArrayCompare } = require('uint8arrays/compare') -const pMap = require('p-map') const { Record } = require('libp2p-record') const PeerId = require('peer-id') -const errcode = require('err-code') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const { toString: uint8ArrayToString } = require('uint8arrays/to-string') const { concat: uint8ArrayConcat } = require('uint8arrays/concat') -const pTimeout = require('p-timeout') +const isPrivateIp = require('private-ip') + +// const IPNS_PREFIX = uint8ArrayFromString('/ipns/') +const PK_PREFIX = uint8ArrayFromString('/pk/') + +/** + * @param {import('./types').PeerData} peer + */ +function removePrivateAddresses ({ id, multiaddrs }) { + return { + id, + multiaddrs: multiaddrs.filter(multiaddr => { + const [[type, addr]] = multiaddr.stringTuples() + + if (type !== 4 && type !== 6) { + return false + } + + // @ts-expect-error types are wrong https://github.com/frenchbread/private-ip/issues/18 + return !isPrivateIp(addr) + }) + } +} + +/** + * @param {import('./types').PeerData} peer + */ +function removePublicAddresses ({ id, multiaddrs }) { + return { + id, + multiaddrs: multiaddrs.filter(multiaddr => { + const [[type, addr]] = multiaddr.stringTuples() + + if (type !== 4 && type !== 6) { + return false + } + + // @ts-expect-error types are wrong https://github.com/frenchbread/private-ip/issues/18 + return isPrivateIp(addr) + }) + } +} /** * Creates a DHT ID by hashing a given Uint8Array. @@ -21,7 +59,7 @@ const pTimeout = require('p-timeout') * @param {Uint8Array} buf * @returns {Promise} */ -exports.convertBuffer = async (buf) => { +const convertBuffer = async (buf) => { return (await sha256.digest(buf)).digest } @@ -31,7 +69,7 @@ exports.convertBuffer = async (buf) => { * @param {PeerId} peer * @returns {Promise} */ -exports.convertPeerId = async (peer) => { +const convertPeerId = async (peer) => { return (await sha256.digest(peer.id)).digest } @@ -41,8 +79,8 @@ exports.convertPeerId = async (peer) => { * @param {Uint8Array} buf * @returns {Key} */ -exports.bufferToKey = (buf) => { - return new Key('/' + exports.encodeBase32(buf), false) +const bufferToKey = (buf) => { + return new Key('/' + uint8ArrayToString(buf, 'base32'), false) } /** @@ -51,9 +89,9 @@ exports.bufferToKey = (buf) => { * @param {PeerId} peer * @returns {Uint8Array} */ -exports.keyForPublicKey = (peer) => { +const keyForPublicKey = (peer) => { return uint8ArrayConcat([ - uint8ArrayFromString('/pk/'), + PK_PREFIX, peer.id ]) } @@ -61,84 +99,22 @@ exports.keyForPublicKey = (peer) => { /** * @param {Uint8Array} key */ -exports.isPublicKeyKey = (key) => { +const isPublicKeyKey = (key) => { return uint8ArrayToString(key.slice(0, 4)) === '/pk/' } /** * @param {Uint8Array} key */ -exports.fromPublicKeyKey = (key) => { - return new PeerId(key.slice(4)) -} - -/** - * Get the current time as timestamp. - * - * @returns {number} - */ -exports.now = () => { - return Date.now() -} - -/** - * Encode a given Uint8Array into a base32 string. - * - * @param {Uint8Array} buf - * @returns {string} - */ -exports.encodeBase32 = (buf) => { - return uint8ArrayToString(buf, 'base32') -} - -/** - * Decode a given base32 string into a Uint8Array. - * - * @param {string} raw - * @returns {Uint8Array} - */ -exports.decodeBase32 = (raw) => { - return uint8ArrayFromString(raw, 'base32') -} - -/** - * Sort peers by distance to the given `target`. - * - * @param {Array} peers - * @param {Uint8Array} target - */ -exports.sortClosestPeers = async (peers, target) => { - const distances = await pMap(peers, async (peer) => { - const id = await exports.convertPeerId(peer) - - return { - peer: peer, - distance: uint8ArrayXor(id, target) - } - }) - - return distances.sort(exports.xorCompare).map((d) => d.peer) -} - -/** - * Compare function to sort an array of elements which have a distance property which is the xor distance to a given element. - * - * @param {{ distance: Uint8Array }} a - * @param {{ distance: Uint8Array }} b - */ -exports.xorCompare = (a, b) => { - return uint8ArrayCompare(a.distance, b.distance) +const isIPNSKey = (key) => { + return uint8ArrayToString(key.slice(0, 4)) === '/ipns/' } /** - * Computes how many results to collect on each disjoint path, rounding up. - * This ensures that we look for at least one result per path. - * - * @param {number} resultsWanted - * @param {number} numPaths - total number of paths + * @param {Uint8Array} key */ -exports.pathSize = (resultsWanted, numPaths) => { - return Math.ceil(resultsWanted / numPaths) +const fromPublicKeyKey = (key) => { + return new PeerId(key.slice(4)) } /** @@ -148,7 +124,7 @@ exports.pathSize = (resultsWanted, numPaths) => { * @param {Uint8Array} value * @returns {Uint8Array} */ -exports.createPutRecord = (key, value) => { +const createPutRecord = (key, value) => { const timeReceived = new Date() const rec = new Record(key, value, timeReceived) @@ -158,87 +134,41 @@ exports.createPutRecord = (key, value) => { /** * Creates a logger for the given subsystem * - * @param {PeerId} [id] - * @param {string} [subsystem] + * @param {string} name */ -exports.logger = (id, subsystem) => { - const name = ['libp2p', 'dht'] - if (subsystem) { - name.push(subsystem) - } - if (id) { - name.push(`${id.toB58String().slice(0, 8)}`) - } - +const logger = (name) => { // Add a formatter for converting to a base58 string debug.formatters.b = (v) => { return base58btc.baseEncode(v) } - const logger = Object.assign(debug(name.join(':')), { - error: debug(name.concat(['error']).join(':')) - }) - - return logger -} - -exports.TimeoutError = class TimeoutError extends Error { - get code () { - return 'ETIMEDOUT' + // Add a formatter for converting to a base58 string + debug.formatters.t = (v) => { + return base32.baseEncode(v) } -} - -/** - * Creates an async function that calls the given `asyncFn` and Errors - * if it does not resolve within `time` ms - * - * @template T - * @param {(...args: any[]) => Promise} asyncFn - * @param {number} [time] - */ -exports.withTimeout = (asyncFn, time) => { - /** - * @param {...any} args - * @returns {Promise} - */ - async function timeoutFn (...args) { - if (!time) { - return asyncFn(...args) - } - - let res - - try { - res = await pTimeout(asyncFn(...args), time) - } catch (/** @type {any} */ err) { - if (err instanceof pTimeout.TimeoutError) { - throw errcode(err, 'ETIMEDOUT') - } - throw err - } - - return res + // Add a formatter for stringifying peer ids + debug.formatters.p = (p) => { + return p.toB58String() } - return timeoutFn + const logger = Object.assign(debug(name), { + error: debug(`${name}:error`) + }) + + return logger } -/** - * Iterates the given `asyncIterator` and runs each item through the given `asyncFn` in parallel. - * Returns a promise that resolves when all items of the `asyncIterator` have been passed - * through `asyncFn`. - * - * @template T - * @template O - * - * @param {AsyncIterable} asyncIterator - * @param {(arg0: T) => Promise} asyncFn - */ -exports.mapParallel = async function (asyncIterator, asyncFn) { - const tasks = [] - for await (const item of asyncIterator) { - tasks.push(asyncFn(item)) - } - return Promise.all(tasks) +module.exports = { + removePrivateAddresses, + removePublicAddresses, + convertBuffer, + convertPeerId, + bufferToKey, + keyForPublicKey, + isPublicKeyKey, + isIPNSKey, + fromPublicKeyKey, + createPutRecord, + logger } diff --git a/test/generate-peers/generate-peers.spec.js b/test/generate-peers/generate-peers.spec.js index 1702398b..7c323b93 100644 --- a/test/generate-peers/generate-peers.spec.js +++ b/test/generate-peers/generate-peers.spec.js @@ -6,7 +6,8 @@ const which = require('which') const execa = require('execa') const { toString: uintArrayToString } = require('uint8arrays/to-string') const PeerId = require('peer-id') -const RoutingTable = require('../../src/routing-table') +const { RoutingTable } = require('../../src/routing-table') +const { RoutingTableRefresh } = require('../../src/routing-table/refresh') const { convertPeerId } = require('../../src/utils') @@ -34,7 +35,7 @@ describe('generate peers', function () { return } - let routingTable + let refresh before(async () => { await execa(go, ['build', 'generate-peer.go'], { @@ -43,10 +44,16 @@ describe('generate peers', function () { }) beforeEach(async function () { - this.timeout(20 * 1000) + this.timeout(540 * 1000) const id = await PeerId.create({ bits: 512 }) - routingTable = new RoutingTable(id, 20) + const table = new RoutingTable({ + peerId: id, + kBucketSize: 20 + }) + refresh = new RoutingTableRefresh({ + routingTable: table + }) }) const TEST_CASES = [{ @@ -72,7 +79,7 @@ describe('generate peers', function () { const localKadId = await convertPeerId(peerId) const goOutput = await fromGo(targetCpl, randPrefix, uintArrayToString(localKadId, 'base64pad')) - const jsOutput = await routingTable._makePeerId(localKadId, randPrefix, targetCpl) + const jsOutput = await refresh._makePeerId(localKadId, randPrefix, targetCpl) expect(goOutput).to.deep.equal(jsOutput) }) diff --git a/test/kad-dht.spec.js b/test/kad-dht.spec.js index ddcb1dfc..a9cae40f 100644 --- a/test/kad-dht.spec.js +++ b/test/kad-dht.spec.js @@ -8,25 +8,51 @@ const { Record } = require('libp2p-record') const errcode = require('err-code') const { equals: uint8ArrayEquals } = require('uint8arrays/equals') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const { toString: uint8ArrayToString } = require('uint8arrays/to-string') - +const drain = require('it-drain') const all = require('async-iterator-all') -const pMapSeries = require('p-map-series') -const pEachSeries = require('p-each-series') const delay = require('delay') - +const filter = require('it-filter') +const last = require('it-last') const kadUtils = require('../src/utils') const c = require('../src/constants') -const Message = require('../src/message') - +const { Message, MESSAGE_TYPE_LOOKUP } = require('../src/message') +const { + peerResponseEvent +} = require('../src/query/events') const createPeerId = require('./utils/create-peer-id') const createValues = require('./utils/create-values') const TestDHT = require('./utils/test-dht') const { countDiffPeers } = require('./utils') +const { sortClosestPeers } = require('./utils/sort-closest-peers') + +/** + * @param {AsyncIterable<>} events + * @param {keyof typeof import('../types').EventTypes} name + */ +async function findEvent (events, name) { + const event = await last( + filter(events, event => event.name === name) + ) + + if (!event) { + throw new Error(`No ${name} event found`) + } + + return event +} describe('KadDHT', () => { let peerIds let values + let tdht + + beforeEach(() => { + tdht = new TestDHT() + }) + + afterEach(() => { + tdht.teardown() + }) before(async function () { this.timeout(10 * 1000) @@ -45,85 +71,69 @@ describe('KadDHT', () => { }) describe('create', () => { - let tdht - - beforeEach(() => { - tdht = new TestDHT() - }) - - afterEach(() => { - tdht.teardown() - }) - it('simple', async () => { const [dht] = await tdht.spawn(1, { kBucketSize: 5 }) - expect(dht).to.have.property('peerId') - expect(dht).to.have.property('kBucketSize', 5) - expect(dht).to.have.property('routingTable') - }) - - it('with validators and selectors', async () => { - const [dht] = await tdht.spawn(1, { - validators: { - ipns: { func: () => { } } - }, - selectors: { - ipns: () => 0 - } - }) - - expect(dht).to.have.property('peerId') - expect(dht).to.have.property('routingTable') - expect(dht.validators).to.have.property('ipns') - expect(dht.selectors).to.have.property('ipns') + expect(dht).to.have.property('put') + expect(dht).to.have.property('get') + expect(dht).to.have.property('provide') + expect(dht).to.have.property('findProviders') + expect(dht).to.have.property('findPeer') + expect(dht).to.have.property('getClosestPeers') + expect(dht).to.have.property('getPublicKey') + expect(dht).to.have.property('enableServerMode') + expect(dht).to.have.property('enableClientMode') }) }) describe('start and stop', () => { - let tdht - - beforeEach(() => { - tdht = new TestDHT() - }) - - afterEach(() => { - tdht.teardown() - }) - it('simple with defaults', async () => { const [dht] = await tdht.spawn(1, null, false) - sinon.spy(dht.network, 'start') - - sinon.spy(dht.network, 'stop') + sinon.spy(dht._wan._network, 'start') + sinon.spy(dht._wan._network, 'stop') + sinon.spy(dht._lan._network, 'start') + sinon.spy(dht._lan._network, 'stop') dht.start() - expect(dht.network.start.calledOnce).to.equal(true) + expect(dht._wan._network.start.calledOnce).to.equal(true) + expect(dht._lan._network.start.calledOnce).to.equal(true) dht.stop() - expect(dht.network.stop.calledOnce).to.equal(true) + expect(dht._wan._network.stop.calledOnce).to.equal(true) + expect(dht._lan._network.stop.calledOnce).to.equal(true) }) it('server mode', async () => { - // Currently on by default + // Currently off by default const [dht] = await tdht.spawn(1, null, false) - sinon.spy(dht.registrar, 'handle') + + dht._libp2p.handle = sinon.stub() dht.start() - expect(dht.registrar.handle.callCount).to.equal(1) + // lan dht is always in server mode + expect(dht._libp2p.handle.callCount).to.equal(1) + + dht.enableServerMode() + // now wan dht should be in server mode too + expect(dht._libp2p.handle.callCount).to.equal(2) + dht.stop() }) it('client mode', async () => { + // Currently on by default const [dht] = await tdht.spawn(1, { clientMode: true }, false) - sinon.spy(dht.registrar, 'handle') + + dht._libp2p.handle = sinon.stub() dht.start() - expect(dht.registrar.handle.callCount).to.equal(0) dht.stop() + + // lan dht is always in server mode + expect(dht._libp2p.handle.callCount).to.equal(1) }) it('should not fail when already started', async () => { @@ -147,51 +157,40 @@ describe('KadDHT', () => { it('put - get same node', async function () { this.timeout(10 * 1000) - const tdht = new TestDHT() const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') - const [dht] = await tdht.spawn(2) + const [dht] = await tdht.spawn(1) // Exchange data through the dht - await dht.put(key, value) + await drain(dht.put(key, value)) - const res = await dht.get(uint8ArrayFromString('/v/hello'), { timeout: 1000 }) - expect(res).to.eql(value) - - tdht.teardown() + const res = await last(dht.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('put - removeLocal', async function () { this.timeout(10 * 1000) - const tdht = new TestDHT() const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') - const [dht] = await tdht.spawn(2) + const [dht] = await tdht.spawn(1) - await dht.put(key, value) + await drain(dht.put(key, value)) - const res = await dht.get(uint8ArrayFromString('/v/hello'), { timeout: 1000 }) - expect(res).to.eql(value) + const res = await last(dht.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) // remove from the local datastore await dht.removeLocal(key) - try { - await dht.datastore.get(key) - } catch (/** @type {any} */ err) { - expect(err).to.exist() - expect(err.code).to.be.eql('ERR_NOT_FOUND') - } finally { - tdht.teardown() - } + + await expect(dht._datastore.get(key)).to.eventually.be.rejected().with.property('code', 'ERR_NOT_FOUND') }) it('put - get', async function () { this.timeout(10 * 1000) - const tdht = new TestDHT() const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') @@ -201,12 +200,10 @@ describe('KadDHT', () => { await tdht.connect(dhtA, dhtB) // Exchange data through the dht - await dhtA.put(key, value) - - const res = await dhtB.get(uint8ArrayFromString('/v/hello'), { timeout: 1000 }) - expect(res).to.eql(value) + await drain(dhtA.put(key, value)) - tdht.teardown() + const res = await last(dhtB.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('put - should require a minimum number of peers to have successful puts', async function () { @@ -217,24 +214,25 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB, dhtC, dhtD] = await tdht.spawn(4) // Stub verify record - const stub = sinon.stub(dhtD, '_verifyRecordLocally').rejects(error) + dhtD._lan._validators.v = { + ...dhtD._lan._validators.v, + func: sinon.stub().rejects(error) + } await Promise.all([ tdht.connect(dhtA, dhtB), tdht.connect(dhtA, dhtC), tdht.connect(dhtA, dhtD) ]) + // DHT operations - await dhtA.put(key, value, { minPeers: 2 }) - const res = await dhtB.get(key, { timeout: 1000 }) + await drain(dhtA.put(key, value, { minPeers: 2 })) - expect(res).to.eql(value) - stub.restore() - tdht.teardown() + const res = await last(dhtB.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('put - should fail if not enough peers can be written to', async function () { @@ -245,12 +243,17 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB, dhtC, dhtD] = await tdht.spawn(4) // Stub verify record - const stub = sinon.stub(dhtD, '_verifyRecordLocally').rejects(error) - const stub2 = sinon.stub(dhtC, '_verifyRecordLocally').rejects(error) + dhtD._lan._validators.v = { + ...dhtD._lan._validators.v, + func: sinon.stub().rejects(error) + } + dhtC._lan._validators.v = { + ...dhtC._lan._validators.v, + func: sinon.stub().rejects(error) + } await Promise.all([ tdht.connect(dhtA, dhtB), @@ -259,11 +262,7 @@ describe('KadDHT', () => { ]) // DHT operations - await expect(dhtA.put(key, value, { minPeers: 2 })).to.eventually.be.rejected().property('code', 'ERR_NOT_ENOUGH_PUT_PEERS') - - stub.restore() - stub2.restore() - tdht.teardown() + await expect(drain(dhtA.put(key, value, { minPeers: 2 }))).to.eventually.be.rejected().property('code', 'ERR_NOT_ENOUGH_PUT_PEERS') }) it('put - should require all peers to be put to successfully if no minPeers specified', async function () { @@ -274,11 +273,13 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('/v/hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB, dhtC] = await tdht.spawn(3) // Stub verify record - const stub = sinon.stub(dhtC, '_verifyRecordLocally').rejects(error) + dhtC._lan._validators.v = { + ...dhtC._lan._validators.v, + func: sinon.stub().rejects(error) + } await Promise.all([ tdht.connect(dhtA, dhtB), @@ -286,10 +287,7 @@ describe('KadDHT', () => { ]) // DHT operations - await expect(dhtA.put(key, value)).to.eventually.be.rejected().property('code', 'ERR_NOT_ENOUGH_PUT_PEERS') - - stub.restore() - tdht.teardown() + await expect(drain(dhtA.put(key, value))).to.eventually.be.rejected().property('code', 'ERR_NOT_ENOUGH_PUT_PEERS') }) it('put - get using key with no prefix (no selector available)', async function () { @@ -298,17 +296,15 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB] = await tdht.spawn(2) await tdht.connect(dhtA, dhtB) // DHT operations - await dhtA.put(key, value) - const res = await dhtB.get(key, { timeout: 1000 }) + await drain(dhtA.put(key, value)) - expect(res).to.eql(value) - tdht.teardown() + const res = await last(dhtB.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('put - get using key from provided validator and selector', async function () { @@ -317,7 +313,6 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('/ipns/hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB] = await tdht.spawn(2, { validators: { ipns: { @@ -332,11 +327,10 @@ describe('KadDHT', () => { await tdht.connect(dhtA, dhtB) // DHT operations - await dhtA.put(key, value) - const res = await dhtB.get(key, { timeout: 1000 }) + await drain(dhtA.put(key, value)) - expect(res).to.eql(value) - tdht.teardown() + const res = await last(dhtB.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('put - get should fail if unrecognized key prefix in get', async function () { @@ -345,16 +339,13 @@ describe('KadDHT', () => { const key = uint8ArrayFromString('/v2/hello') const value = uint8ArrayFromString('world') - const tdht = new TestDHT() const [dhtA, dhtB] = await tdht.spawn(2) await tdht.connect(dhtA, dhtB) - await dhtA.put(key, value) - - await expect(dhtA.get(key)).to.eventually.be.rejected().property('code', 'ERR_UNRECOGNIZED_KEY_PREFIX') + await drain(dhtA.put(key, value)) - tdht.teardown() + await expect(last(dhtA.get(key))).to.eventually.be.rejected().property('code', 'ERR_UNRECOGNIZED_KEY_PREFIX') }) it('put - get with update', async function () { @@ -364,30 +355,32 @@ describe('KadDHT', () => { const valueA = uint8ArrayFromString('worldA') const valueB = uint8ArrayFromString('worldB') - const tdht = new TestDHT() const [dhtA, dhtB] = await tdht.spawn(2) - const dhtASpy = sinon.spy(dhtA, '_putValueToPeer') + const dhtASpy = sinon.spy(dhtA._lan._network, 'sendRequest') // Put before peers connected - await dhtA.put(key, valueA) - await dhtB.put(key, valueB) + await drain(dhtA.put(key, valueA)) + await drain(dhtB.put(key, valueB)) // Connect peers await tdht.connect(dhtA, dhtB) // Get values - const resA = await dhtA.get(key, { timeout: 1000 }) - const resB = await dhtB.get(key, { timeout: 1000 }) + const resA = await last(dhtA.get(key)) + const resB = await last(dhtB.get(key)) // First is selected - expect(resA).to.eql(valueA) - expect(resB).to.eql(valueA) + expect(resA).to.have.property('value').that.equalBytes(valueA) + expect(resB).to.have.property('value').that.equalBytes(valueA) - expect(dhtASpy.callCount).to.eql(1) - expect(dhtASpy.getCall(0).args[2].isEqual(dhtB.peerId)).to.eql(true) // inform B + expect(dhtASpy.callCount).to.eql(2) - tdht.teardown() + expect(dhtASpy.getCall(0).args[0].equals(dhtB._libp2p.peerId)).to.be.true() // query B + expect(MESSAGE_TYPE_LOOKUP[dhtASpy.getCall(0).args[1].type]).to.equal('GET_VALUE') // query B + + expect(dhtASpy.getCall(1).args[0].equals(dhtB._libp2p.peerId)).to.be.true() // update B + expect(MESSAGE_TYPE_LOOKUP[dhtASpy.getCall(1).args[1].type]).to.equal('PUT_VALUE') // update B }) it('layered get', async function () { @@ -397,7 +390,7 @@ describe('KadDHT', () => { const value = uint8ArrayFromString('world') const nDHTs = 4 - const tdht = new TestDHT() + const dhts = await tdht.spawn(nDHTs) // Connect all @@ -408,11 +401,10 @@ describe('KadDHT', () => { ]) // DHT operations - await dhts[3].put(key, value) - const res = await dhts[0].get(key, { timeout: 1000 }) + await drain(dhts[3].put(key, value)) - expect(res).to.eql(value) - tdht.teardown() + const res = await last(dhts[0].get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) it('getMany with nvals=1 goes out to swarm if there is no local value', async () => { @@ -420,26 +412,17 @@ describe('KadDHT', () => { const value = uint8ArrayFromString('world') const rec = new Record(key, value) - const tdht = new TestDHT() const [dht] = await tdht.spawn(1) - const stubs = [ - // Simulate returning a peer id to query - sinon.stub(dht.routingTable, 'closestPeers').returns([peerIds[1]]), - // Simulate going out to the network and returning the record - sinon.stub(dht, '_getValueOrPeers').callsFake(async () => ({ record: rec })) // eslint-disable-line require-await - ] - - const res = await dht.getMany(key, 1) - - expect(res.length).to.eql(1) - expect(res[0].val).to.eql(value) - - for (const stub of stubs) { - stub.restore() - } + // Simulate returning a peer id to query + sinon.stub(dht._lan._routingTable, 'closestPeers').returns([peerIds[1]]) + // Simulate going out to the network and returning the record + sinon.stub(dht._lan._peerRouting, 'getValueOrPeers').callsFake(async function * (peer) { + yield peerResponseEvent({ peer: peer, record: rec }) + }) // eslint-disable-line require-await - tdht.teardown() + const res = await last(dht.get(key)) + expect(res).to.have.property('value').that.equalBytes(value) }) }) @@ -447,12 +430,11 @@ describe('KadDHT', () => { it('provides', async function () { this.timeout(20 * 1000) - const tdht = new TestDHT() const dhts = await tdht.spawn(4) - const ids = dhts.map((d) => d.peerId) + const ids = dhts.map((d) => d._libp2p.peerId) const idsB58 = ids.map(id => id.toB58String()) - sinon.spy(dhts[3].network, 'sendMessage') + sinon.spy(dhts[3]._lan._network, 'sendMessage') // connect peers await Promise.all([ @@ -462,10 +444,10 @@ describe('KadDHT', () => { ]) // provide values - await Promise.all(values.map((value) => dhts[3].provide(value.cid))) + await Promise.all(values.map((value) => drain(dhts[3].provide(value.cid)))) // Expect an ADD_PROVIDER message to be sent to each peer for each value - const fn = dhts[3].network.sendMessage + const fn = dhts[3]._lan._network.sendMessage const valuesBuffs = values.map(v => v.cid.bytes) const calls = fn.getCalls().map(c => c.args) @@ -479,23 +461,30 @@ describe('KadDHT', () => { // Expect each DHT to find the provider of each value let n = 0 - await pEachSeries(values, async (v) => { + for (const v of values) { n = (n + 1) % 3 - const provs = await all(dhts[n].findProviders(v.cid, { timeout: 5000 })) + const events = await all(dhts[n].findProviders(v.cid)) + const provs = Object.values(events.reduce((acc, curr) => { + if (curr.name === 'PEER_RESPONSE') { + curr.providers.forEach(peer => { + acc[peer.id.toB58String()] = peer.id + }) + } - expect(provs).to.have.length(1) - expect(provs[0].id.id).to.be.eql(ids[3].id) - }) + return acc + }, {})) - tdht.teardown() + expect(provs).to.have.length(1) + expect(provs[0].id).to.equalBytes(ids[3].id) + } }) it('find providers', async function () { this.timeout(20 * 1000) const val = values[0] - const tdht = new TestDHT() + const dhts = await tdht.spawn(3) // Connect @@ -504,57 +493,60 @@ describe('KadDHT', () => { tdht.connect(dhts[1], dhts[2]) ]) - await Promise.all(dhts.map((dht) => dht.provide(val.cid))) + await Promise.all(dhts.map((dht) => drain(dht.provide(val.cid)))) - const res0 = await all(dhts[0].findProviders(val.cid)) - const res1 = await all(dhts[0].findProviders(val.cid, { maxNumProviders: 2 })) + const events = await all(dhts[0].findProviders(val.cid)) // find providers find all the 3 providers - expect(res0).to.exist() - expect(res0).to.have.length(3) - - // find providers limited to a maxium of 2 providers - expect(res1).to.exist() - expect(res1).to.have.length(2) + const provs = Object.values(events.reduce((acc, curr) => { + if (curr.name === 'PEER_RESPONSE') { + curr.providers.forEach(peer => { + acc[peer.id.toB58String()] = peer.id + }) + } - tdht.teardown() + return acc + }, {})) + expect(provs).to.have.length(3) }) it('find providers from client', async function () { this.timeout(20 * 1000) const val = values[0] - const tdht = new TestDHT() - const dhts = await tdht.spawn(2) + + const dhts = await tdht.spawn(3) const [clientDHT] = await tdht.spawn(1, { clientMode: true }) // Connect await Promise.all([ tdht.connect(clientDHT, dhts[0]), - tdht.connect(dhts[0], dhts[1]) + tdht.connect(dhts[0], dhts[1]), + tdht.connect(dhts[1], dhts[2]) ]) - await Promise.all(dhts.map((dht) => dht.provide(val.cid))) + await Promise.all(dhts.map((dht) => drain(dht.provide(val.cid)))) - const res0 = await all(clientDHT.findProviders(val.cid)) - const res1 = await all(clientDHT.findProviders(val.cid, { maxNumProviders: 1 })) + const events = await all(dhts[0].findProviders(val.cid)) - // find providers find all the 2 providers - expect(res0).to.exist() - expect(res0).to.have.length(2) - - // find providers limited to a maxium of 1 providers - expect(res1).to.exist() - expect(res1).to.have.length(1) + // find providers find all the 3 providers + const provs = Object.values(events.reduce((acc, curr) => { + if (curr.name === 'PEER_RESPONSE') { + curr.providers.forEach(peer => { + acc[peer.id.toB58String()] = peer.id + }) + } - tdht.teardown() + return acc + }, {})) + expect(provs).to.have.length(3) }) it('find client provider', async function () { this.timeout(20 * 1000) const val = values[0] - const tdht = new TestDHT() + const dhts = await tdht.spawn(2) const [clientDHT] = await tdht.spawn(1, { clientMode: true }) @@ -564,34 +556,45 @@ describe('KadDHT', () => { tdht.connect(dhts[0], dhts[1]) ]) - await clientDHT.provide(val.cid) + await drain(clientDHT.provide(val.cid)) await delay(1e3) - const res = await all(dhts[1].findProviders(val.cid)) + const events = await all(dhts[1].findProviders(val.cid)) // find providers find the client provider - expect(res).to.exist() - expect(res).to.have.length(1) + const provs = Object.values(events.reduce((acc, curr) => { + if (curr.name === 'PEER_RESPONSE') { + curr.providers.forEach(peer => { + acc[peer.id.toB58String()] = peer.id + }) + } - tdht.teardown() + return acc + }, {})) + expect(provs).to.have.length(1) }) it('find one provider locally', async function () { this.timeout(20 * 1000) const val = values[0] - const tdht = new TestDHT() + const [dht] = await tdht.spawn(1) - sinon.stub(dht.providers, 'getProviders').returns([dht.peerId]) + sinon.stub(dht._lan._providers, 'getProviders').returns([dht._libp2p.peerId]) // Find provider - const res = await all(dht.findProviders(val.cid, { maxNumProviders: 1 })) - - expect(res).to.exist() - expect(res).to.have.length(1) + const events = await all(dht.findProviders(val.cid)) + const provs = Object.values(events.reduce((acc, curr) => { + if (curr.name === 'PEER_RESPONSE') { + curr.providers.forEach(peer => { + acc[peer.id.toB58String()] = peer.id + }) + } - tdht.teardown() + return acc + }, {})) + expect(provs).to.have.length(1) }) }) @@ -599,22 +602,19 @@ describe('KadDHT', () => { it('findPeer', async function () { this.timeout(40 * 1000) - const nDHTs = 4 - const tdht = new TestDHT() - const dhts = await tdht.spawn(nDHTs) + const dhts = await tdht.spawn(10) - // Connect all - await Promise.all([ - tdht.connect(dhts[0], dhts[1]), - tdht.connect(dhts[1], dhts[2]), - tdht.connect(dhts[2], dhts[3]) - ]) + // connect all in a line + for (let i = 0; i < dhts.length - 1; i++) { + await tdht.connect(dhts[i], dhts[i + 1]) + } + + const ids = dhts.map((d) => d._libp2p.peerId) - const ids = dhts.map((d) => d.peerId) - const res = await dhts[0].findPeer(ids[3], { timeout: 1000 }) - expect(res.id.isEqual(ids[3])).to.eql(true) + // ask the peer at the start of the line for the id of the peer at the end of the line + const finalPeer = await findEvent(dhts[0].findPeer(ids[ids.length - 1]), 'FINAL_PEER') - tdht.teardown() + expect(finalPeer.peer.id.isEqual(ids[ids.length - 1])).to.eql(true) }) it('find peer query', async function () { @@ -622,14 +622,14 @@ describe('KadDHT', () => { // Create 101 nodes const nDHTs = 100 - const tdht = new TestDHT() + const dhts = await tdht.spawn(nDHTs) - const dhtsById = new Map(dhts.map((d) => [d.peerId, d])) + const dhtsById = new Map(dhts.map((d) => [d._libp2p.peerId, d])) const ids = [...dhtsById.keys()] // The origin node for the FIND_PEER query - const guy = dhts[0] + const originNode = dhts[0] // The key const val = uint8ArrayFromString('foobar') @@ -637,7 +637,7 @@ describe('KadDHT', () => { // Hash the key into the DHT's key format const rtval = await kadUtils.convertBuffer(val) // Make connections between nodes close to each other - const sorted = await kadUtils.sortClosestPeers(ids, rtval) + const sorted = await sortClosestPeers(ids, rtval) const conns = [] const maxRightIndex = sorted.length - 1 @@ -660,7 +660,7 @@ describe('KadDHT', () => { // Get the alpha (3) closest peers to the key from the origin's // routing table - const rtablePeers = guy.routingTable.closestPeers(rtval, c.ALPHA) + const rtablePeers = originNode._lan._routingTable.closestPeers(rtval, c.ALPHA) expect(rtablePeers).to.have.length(c.ALPHA) // The set of peers used to initiate the query (the closest alpha @@ -670,12 +670,13 @@ describe('KadDHT', () => { rtableSet[p.toB58String()] = true }) - const guyIndex = ids.findIndex(i => uint8ArrayEquals(i.id, guy.peerId.id)) - const otherIds = ids.slice(0, guyIndex).concat(ids.slice(guyIndex + 1)) + const originNodeIndex = ids.findIndex(i => uint8ArrayEquals(i.id, originNode._libp2p.peerId.id)) + const otherIds = ids.slice(0, originNodeIndex).concat(ids.slice(originNodeIndex + 1)) // Make the query - const out = await all(guy.getClosestPeers(val)) - const actualClosest = await kadUtils.sortClosestPeers(otherIds, rtval) + const out = (await all(originNode.getClosestPeers(val))) + .filter(event => event.name === 'FINAL_PEER').map(event => event.peer.id) + const actualClosest = await sortClosestPeers(otherIds, rtval) // Expect that the response includes nodes that are were not // already in the origin's routing table (ie it went out to @@ -683,34 +684,27 @@ describe('KadDHT', () => { expect(out.filter((p) => !rtableSet[p.toB58String()])) .to.not.be.empty() - // Expect that there were kValue peers found - expect(out).to.have.length(c.K) - // The expected closest kValue peers to the key const exp = actualClosest.slice(0, c.K) - // Expect the kValue peers found to be the kValue closest connected peers + // Expect the kValue peers found to include the kValue closest connected peers // to the key - expect(countDiffPeers(exp, out)).to.eql(0) - - tdht.teardown() + expect(countDiffPeers(out, exp)).to.equal(0) }) it('getClosestPeers', async function () { this.timeout(40 * 1000) const nDHTs = 30 - const tdht = new TestDHT() const dhts = await tdht.spawn(nDHTs) - await pMapSeries(dhts, async (_, index) => { - await tdht.connect(dhts[index], dhts[(index + 1) % dhts.length]) - }) + for (let i = 0; i < dhts.length - 1; i++) { + await tdht.connect(dhts[i], dhts[(i + 1) % dhts.length]) + } - const res = await all(dhts[1].getClosestPeers(uint8ArrayFromString('foo'))) - expect(res).to.have.length(c.K) + const res = await all(filter(dhts[1].getClosestPeers(uint8ArrayFromString('foo')), event => event.name === 'FINAL_PEER')) - tdht.teardown() + expect(res).to.have.length(c.K) }) }) @@ -718,156 +712,44 @@ describe('KadDHT', () => { it('already known', async function () { this.timeout(20 * 1000) - const tdht = new TestDHT() const dhts = await tdht.spawn(2) - const ids = dhts.map((d) => d.peerId) - dhts[0].peerStore.addressBook.add(dhts[1].peerId, [new Multiaddr('/ip4/160.1.1.1/tcp/80')]) + const ids = dhts.map((d) => d._libp2p.peerId) + dhts[0]._libp2p.peerStore.addressBook.add(dhts[1]._libp2p.peerId, [new Multiaddr('/ip4/160.1.1.1/tcp/80')]) const key = await dhts[0].getPublicKey(ids[1]) - expect(key).to.eql(dhts[1].peerId.pubKey) + expect(key).to.eql(dhts[1]._libp2p.peerId.pubKey) await delay(100) - - tdht.teardown() }) it('connected node', async function () { this.timeout(30 * 1000) - const tdht = new TestDHT() const dhts = await tdht.spawn(2) - const ids = dhts.map((d) => d.peerId) + const ids = dhts.map((d) => d._libp2p.peerId) await tdht.connect(dhts[0], dhts[1]) - dhts[0].peerStore.addressBook.add(dhts[1].peerId, [new Multiaddr('/ip4/160.1.1.1/tcp/80')]) + dhts[0]._libp2p.peerStore.addressBook.add(dhts[1]._libp2p.peerId, [new Multiaddr('/ip4/160.1.1.1/tcp/80')]) const key = await dhts[0].getPublicKey(ids[1]) - expect(uint8ArrayEquals(key, dhts[1].peerId.pubKey)).to.eql(true) - - tdht.teardown() - }) - }) - - describe('internals', () => { - let tdht - - beforeEach(() => { - tdht = new TestDHT() - }) - - afterEach(() => { - tdht.teardown() - }) - - it('_nearestPeersToQuery', async () => { - const [dht] = await tdht.spawn(1) - - await dht._add(peerIds[1]) - const res = await dht._nearestPeersToQuery({ key: uint8ArrayFromString('hello') }) - expect(res).to.be.eql([{ - id: peerIds[1], - multiaddrs: [] - }]) - }) - - it('_betterPeersToQuery', async () => { - const [dht] = await tdht.spawn(1) - - await dht._add(peerIds[1]) - await dht._add(peerIds[2]) - const res = await dht._betterPeersToQuery({ key: uint8ArrayFromString('hello') }, peerIds[1]) - - expect(res[0].id).to.be.eql(peerIds[2]) - }) - - describe('_checkLocalDatastore', () => { - let tdht - - beforeEach(() => { - tdht = new TestDHT() - }) - - afterEach(() => { - tdht.teardown() - }) - - it('allow a peer record from store if recent', async () => { - const [dht] = await tdht.spawn(1) - - const record = new Record( - uint8ArrayFromString('hello'), - uint8ArrayFromString('world') - ) - record.timeReceived = new Date() - - await dht.contentFetching._putLocal(record.key, record.serialize()) - const rec = await dht._checkLocalDatastore(record.key) - - expect(rec).to.exist('Record should not have expired') - expect(uint8ArrayToString(rec.value)).to.equal(uint8ArrayToString(record.value)) - }) - - it('delete entries received from peers that have expired', async () => { - const [dht] = await tdht.spawn(1) - - const record = new Record( - uint8ArrayFromString('hello'), - uint8ArrayFromString('world') - ) - const received = new Date() - received.setDate(received.getDate() - 2) - - record.timeReceived = received - - await dht.contentFetching._putLocal(record.key, record.serialize()) - - const lookup = await dht.datastore.get(kadUtils.bufferToKey(record.key)) - expect(lookup).to.exist('Record should be in the local datastore') - - let eventResponse - dht.onRemove = (record) => { - eventResponse = { record } - } - - const rec = await dht._checkLocalDatastore(record.key) - expect(rec).to.not.exist('Record should have expired') - - expect(eventResponse).to.have.property('record').eql(record) - // TODO - // const lookup2 = await dht.datastore.get(kadUtils.bufferToKey(record.key)) - // expect(lookup2).to.not.exist('Record should be removed from datastore') - }) - }) - - it('_verifyRecordLocally', async () => { - const [dht] = await tdht.spawn(1) - const record = new Record( - uint8ArrayFromString('hello'), - uint8ArrayFromString('world') - ) - const enc = record.serialize() - - return dht._verifyRecordLocally(Record.deserialize(enc)) + expect(uint8ArrayEquals(key, dhts[1]._libp2p.peerId.pubKey)).to.eql(true) }) }) describe('errors', () => { - it('get many should fail if only has one peer', async function () { + it('get should fail if only has one peer', async function () { this.timeout(20 * 1000) - const tdht = new TestDHT() const dhts = await tdht.spawn(1) // TODO: Switch not closing well, but it will be removed // (invalid transition: STOPPED -> done) await delay(100) - await expect(dhts[0].getMany(uint8ArrayFromString('/v/hello'), 5)).to.eventually.be.rejected().property('code', 'ERR_NO_PEERS_IN_ROUTING_TABLE') - - tdht.teardown() + await expect(all(dhts[0].get(uint8ArrayFromString('/v/hello')))).to.eventually.be.rejected().property('code', 'ERR_NO_PEERS_IN_ROUTING_TABLE') // TODO: after error switch }) @@ -878,91 +760,36 @@ describe('KadDHT', () => { const errCode = 'ERR_INVALID_RECORD_FAKE' const error = errcode(new Error('fake error'), errCode) - const tdht = new TestDHT() const [dhtA, dhtB] = await tdht.spawn(2) - const stub = sinon.stub(dhtA, '_getValueOrPeers').rejects(error) + const stub = sinon.stub(dhtA._lan._network._dialer, 'dialProtocol').rejects(error) await tdht.connect(dhtA, dhtB) - await expect(dhtA.get(uint8ArrayFromString('/v/hello'), { timeout: 1000 })).to.eventually.be.rejected().property('code', errCode) - - stub.restore() - tdht.teardown() - }) - - it('get should handle correctly an invalid record error and return not found', async function () { - this.timeout(20 * 1000) - - const error = errcode(new Error('invalid record error'), 'ERR_INVALID_RECORD') - - const tdht = new TestDHT() - const [dhtA, dhtB] = await tdht.spawn(2) - const stub = sinon.stub(dhtA, '_getValueOrPeers').rejects(error) - - await tdht.connect(dhtA, dhtB) + const errors = await all(filter(dhtA.get(uint8ArrayFromString('/v/hello')), event => event.name === 'QUERY_ERROR')) - await expect(dhtA.get(uint8ArrayFromString('/v/hello'), { timeout: 1000 })).to.eventually.be.rejected().property('code', 'ERR_NOT_FOUND') + expect(errors).to.have.lengthOf(1) + expect(errors).to.have.nested.property('[0].error.code', errCode) stub.restore() - tdht.teardown() }) it('findPeer should fail if no closest peers available', async function () { this.timeout(40 * 1000) - const tdht = new TestDHT() const dhts = await tdht.spawn(4) - const ids = dhts.map((d) => d.peerId) + const ids = dhts.map((d) => d._libp2p.peerId) await Promise.all([ tdht.connect(dhts[0], dhts[1]), tdht.connect(dhts[1], dhts[2]), tdht.connect(dhts[2], dhts[3]) ]) - const stub = sinon.stub(dhts[0].routingTable, 'closestPeers').returns([]) + const stub = sinon.stub(dhts[0]._lan._routingTable, 'closestPeers').returns([]) - await expect(dhts[0].findPeer(ids[3], { timeout: 1000 })).to.eventually.be.rejected().property('code', 'ERR_LOOKUP_FAILED') + await expect(drain(dhts[0].findPeer(ids[3]))).to.eventually.be.rejected().property('code', 'ERR_LOOKUP_FAILED') stub.restore() - tdht.teardown() - }) - - it('should not find peers with different protocols', async function () { - this.timeout(40 * 1000) - - const protocol1 = '/test1' - const protocol2 = '/test2' - - const tdht = new TestDHT() - const dhts = [] - dhts.push(...await tdht.spawn(2, { protocolPrefix: protocol1 })) - dhts.push(...await tdht.spawn(2, { protocolPrefix: protocol2 })) - - // Connect all - await Promise.all([ - tdht.connect(dhts[0], dhts[1]), - tdht.connect(dhts[1], dhts[2]), - tdht.connect(dhts[2], dhts[3]) - ]) - - const ids = dhts.map((d) => d.peerId) - - await expect(dhts[0].findPeer(ids[3], { timeout: 1000 })).to.eventually.be.rejected().property('code', 'ERR_NOT_FOUND') - - tdht.teardown() - }) - - it('force legacy protocol', async function () { - this.timeout(40 * 1000) - - const protocol = '/test/dht/0.0.0' - - const tdht = new TestDHT() - const [dht] = await tdht.spawn(1, { protocolPrefix: protocol, forceProtocolLegacy: true }) - - expect(dht.protocol).to.eql(protocol) - tdht.teardown() }) }) }) diff --git a/test/kad-utils.spec.js b/test/kad-utils.spec.js index 5a79b054..efa43aad 100644 --- a/test/kad-utils.spec.js +++ b/test/kad-utils.spec.js @@ -2,8 +2,6 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const PeerId = require('peer-id') -const { xor: uint8ArrayXor } = require('uint8arrays/xor') const { concat: uint8ArrayConcat } = require('uint8arrays/concat') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const { toString: uint8ArrayToString } = require('uint8arrays/to-string') @@ -33,72 +31,6 @@ describe('kad utils', () => { }) }) - describe('withTimeout', () => { - it('rejects with the error in the original function', async () => { - const original = async () => { throw new Error('explode') } // eslint-disable-line require-await - const asyncFn = utils.withTimeout(original, 100) - let err - try { - await asyncFn() - } catch (/** @type {any} */ _err) { - err = _err - } - - expect(err).to.exist() - expect(err.message).to.include('explode') - }) - }) - - describe('sortClosestPeers', () => { - it('sorts a list of PeerIds', async () => { - const rawIds = [ - '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31', - '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32', - '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33', - '11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34' - ] - - const ids = rawIds.map((raw) => { - return new PeerId(uint8ArrayFromString(raw)) - }) - - const input = [ - ids[2], - ids[1], - ids[3], - ids[0] - ] - - const id = await utils.convertPeerId(ids[0]) - const out = await utils.sortClosestPeers(input, id) - - expect( - out.map((m) => m.toB58String()) - ).to.eql([ - ids[0], - ids[3], - ids[2], - ids[1] - ].map((m) => m.toB58String())) - }) - }) - - describe('xorCompare', () => { - it('sorts two distances', () => { - const target = uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a90') - const a = { - distance: uint8ArrayXor(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a95'), target) - } - const b = { - distance: uint8ArrayXor(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a96'), target) - } - - expect(utils.xorCompare(a, b)).to.eql(-1) - expect(utils.xorCompare(b, a)).to.eql(1) - expect(utils.xorCompare(a, a)).to.eql(0) - }) - }) - describe('keyForPublicKey', () => { it('works', async () => { const peers = await createPeerId(1) diff --git a/test/limited-peer-list.spec.js b/test/limited-peer-list.spec.js deleted file mode 100644 index 4ab056b6..00000000 --- a/test/limited-peer-list.spec.js +++ /dev/null @@ -1,37 +0,0 @@ -/* eslint-env mocha */ -'use strict' - -const { expect } = require('aegir/utils/chai') - -const LimitedPeerList = require('../src/peer-list/limited-peer-list') -const createPeerId = require('./utils/create-peer-id') - -describe('LimitedPeerList', () => { - let peers - - before(async () => { - peers = await createPeerId(5) - }) - - it('basics', () => { - const l = new LimitedPeerList(4) - - expect(l.push({ id: peers[0] })).to.eql(true) - expect(l.push({ id: peers[0] })).to.eql(false) - expect(l.push({ id: peers[1] })).to.eql(true) - expect(l.push({ id: peers[2] })).to.eql(true) - expect(l.push({ id: peers[3] })).to.eql(true) - expect(l.push({ id: peers[4] })).to.eql(false) - - expect(l).to.have.length(4) - expect(l.pop()).to.eql({ id: peers[3] }) - expect(l).to.have.length(3) - expect(l.push({ id: peers[4] })).to.eql(true) - expect(l.toArray()).to.eql([ - { id: peers[0] }, - { id: peers[1] }, - { id: peers[2] }, - { id: peers[4] } - ]) - }) -}) diff --git a/test/message.spec.js b/test/message.spec.js index 4ec63a19..de5290f1 100644 --- a/test/message.spec.js +++ b/test/message.spec.js @@ -9,7 +9,7 @@ const random = require('lodash.random') const { Record } = require('libp2p-record') const fs = require('fs') const path = require('path') -const Message = require('../src/message') +const { Message } = require('../src/message') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') describe('Message', () => { diff --git a/test/multiple-nodes.spec.js b/test/multiple-nodes.spec.js index e4c224ce..41a692e1 100644 --- a/test/multiple-nodes.spec.js +++ b/test/multiple-nodes.spec.js @@ -4,6 +4,8 @@ const { expect } = require('aegir/utils/chai') const TestDHT = require('./utils/test-dht') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +const drain = require('it-drain') +const last = require('it-last') describe('multiple nodes', () => { const n = 8 @@ -15,7 +17,9 @@ describe('multiple nodes', () => { this.timeout(10 * 1000) tdht = new TestDHT() - dhts = await tdht.spawn(n) + dhts = await tdht.spawn(n, { + clientMode: false + }) // all nodes except the last one const range = Array.from(Array(n - 1).keys()) @@ -35,25 +39,25 @@ describe('multiple nodes', () => { const key = uint8ArrayFromString('/v/hello0') const value = uint8ArrayFromString('world') - await dhts[7].put(key, value) + await drain(dhts[7].put(key, value)) const res = await Promise.all([ - dhts[0].get(key, { timeout: 1000 }), - dhts[1].get(key, { timeout: 1000 }), - dhts[2].get(key, { timeout: 1000 }), - dhts[3].get(key, { timeout: 1000 }), - dhts[4].get(key, { timeout: 1000 }), - dhts[5].get(key, { timeout: 1000 }), - dhts[6].get(key, { timeout: 1000 }) + last(dhts[0].get(key)), + last(dhts[1].get(key)), + last(dhts[2].get(key)), + last(dhts[3].get(key)), + last(dhts[4].get(key)), + last(dhts[5].get(key)), + last(dhts[6].get(key)) ]) - expect(res[0]).to.eql(uint8ArrayFromString('world')) - expect(res[1]).to.eql(uint8ArrayFromString('world')) - expect(res[2]).to.eql(uint8ArrayFromString('world')) - expect(res[3]).to.eql(uint8ArrayFromString('world')) - expect(res[4]).to.eql(uint8ArrayFromString('world')) - expect(res[5]).to.eql(uint8ArrayFromString('world')) - expect(res[6]).to.eql(uint8ArrayFromString('world')) + expect(res[0]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[1]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[2]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[3]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[4]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[5]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[6]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) }) it('put to a node and get with the others', async function () { @@ -61,25 +65,25 @@ describe('multiple nodes', () => { const key = uint8ArrayFromString('/v/hello1') const value = uint8ArrayFromString('world') - await dhts[1].put(key, value) + await drain(dhts[1].put(key, value)) const res = await Promise.all([ - dhts[0].get(key, { timeout: 1000 }), - dhts[2].get(key, { timeout: 1000 }), - dhts[3].get(key, { timeout: 1000 }), - dhts[4].get(key, { timeout: 1000 }), - dhts[5].get(key, { timeout: 1000 }), - dhts[6].get(key, { timeout: 1000 }), - dhts[7].get(key, { timeout: 1000 }) + last(dhts[0].get(key)), + last(dhts[2].get(key)), + last(dhts[3].get(key)), + last(dhts[4].get(key)), + last(dhts[5].get(key)), + last(dhts[6].get(key)), + last(dhts[7].get(key)) ]) - expect(res[0]).to.eql(uint8ArrayFromString('world')) - expect(res[1]).to.eql(uint8ArrayFromString('world')) - expect(res[2]).to.eql(uint8ArrayFromString('world')) - expect(res[3]).to.eql(uint8ArrayFromString('world')) - expect(res[4]).to.eql(uint8ArrayFromString('world')) - expect(res[5]).to.eql(uint8ArrayFromString('world')) - expect(res[6]).to.eql(uint8ArrayFromString('world')) + expect(res[0]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[1]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[2]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[3]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[4]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[5]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) + expect(res[6]).have.property('value').that.equalBytes(uint8ArrayFromString('world')) }) it('put to several nodes in series with different values and get the last one in a subset of them', async function () { @@ -87,22 +91,22 @@ describe('multiple nodes', () => { const key = uint8ArrayFromString('/v/hallo') const result = uint8ArrayFromString('world4') - await dhts[0].put(key, uint8ArrayFromString('world0')) - await dhts[1].put(key, uint8ArrayFromString('world1')) - await dhts[2].put(key, uint8ArrayFromString('world2')) - await dhts[3].put(key, uint8ArrayFromString('world3')) - await dhts[4].put(key, uint8ArrayFromString('world4')) + await drain(dhts[0].put(key, uint8ArrayFromString('world0'))) + await drain(dhts[1].put(key, uint8ArrayFromString('world1'))) + await drain(dhts[2].put(key, uint8ArrayFromString('world2'))) + await drain(dhts[3].put(key, uint8ArrayFromString('world3'))) + await drain(dhts[4].put(key, uint8ArrayFromString('world4'))) const res = await Promise.all([ - dhts[4].get(key, { timeout: 2000 }), - dhts[5].get(key, { timeout: 2000 }), - dhts[6].get(key, { timeout: 2000 }), - dhts[7].get(key, { timeout: 2000 }) + last(dhts[4].get(key)), + last(dhts[5].get(key)), + last(dhts[6].get(key)), + last(dhts[7].get(key)) ]) - expect(res[0]).to.eql(result) - expect(res[1]).to.eql(result) - expect(res[2]).to.eql(result) - expect(res[3]).to.eql(result) + expect(res[0]).have.property('value').that.equalBytes(result) + expect(res[1]).have.property('value').that.equalBytes(result) + expect(res[2]).have.property('value').that.equalBytes(result) + expect(res[3]).have.property('value').that.equalBytes(result) }) }) diff --git a/test/network.spec.js b/test/network.spec.js index a07e4c50..61084c4d 100644 --- a/test/network.spec.js +++ b/test/network.spec.js @@ -4,12 +4,12 @@ const { expect } = require('aegir/utils/chai') const pair = require('it-pair') const pipe = require('it-pipe') -const delay = require('delay') const lp = require('it-length-prefixed') const pDefer = require('p-defer') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +const all = require('it-all') -const Message = require('../src/message') +const { Message } = require('../src/message') const TestDHT = require('./utils/test-dht') @@ -20,7 +20,9 @@ describe('Network', () => { before(async function () { this.timeout(10 * 1000) tdht = new TestDHT() - ;[dht] = await tdht.spawn(1) + ;[dht] = await tdht.spawn(1, { + clientMode: false + }) }) after(() => tdht.teardown()) @@ -30,16 +32,15 @@ describe('Network', () => { const msg = new Message(Message.TYPES.PING, uint8ArrayFromString('hello'), 0) // mock dial - dht.dialer.connectToPeer = () => { - return { - newStream: () => { - return { stream: pair() } // {source, sink} streams that are internally connected - } - } + dht._libp2p.dialProtocol = () => { + return { stream: pair() } // {source, sink} streams that are internally connected } - const response = await dht.network.sendRequest(dht.peerId, msg) - expect(response.type).to.eql(Message.TYPES.PING) + const events = await all(dht._lan._network.sendRequest(dht._libp2p.peerId, msg)) + const response = events + .filter(event => event.name === 'PEER_RESPONSE') + .pop() + expect(response.messageType).to.eql(Message.TYPES.PING) }) it('send and response different messages', async () => { @@ -54,7 +55,7 @@ describe('Network', () => { const msg = new Message(Message.TYPES.PING, uint8ArrayFromString('hello'), 0) // mock it - dht.dialer.connectToPeer = async () => { + dht._libp2p.dialProtocol = async () => { const msg = new Message(Message.TYPES.FIND_NODE, uint8ArrayFromString('world'), 0) const data = [] @@ -91,72 +92,18 @@ describe('Network', () => { finish() } - return { - newStream: () => { - return { stream: { source, sink } } - } - } + return { stream: { source, sink } } } - const response = await dht.network.sendRequest(dht.peerId, msg) + const events = await all(dht._lan._network.sendRequest(dht._libp2p.peerId, msg)) + const response = events + .filter(event => event.name === 'PEER_RESPONSE') + .pop() - expect(response.type).to.eql(Message.TYPES.FIND_NODE) + expect(response.messageType).to.eql(Message.TYPES.FIND_NODE) finish() return defer.promise }) - - it('timeout on no message', async () => { - const defer = pDefer() - let i = 0 - const finish = () => { - if (i++ === 1) { - defer.resolve() - } - } - - const msg = new Message(Message.TYPES.PING, uint8ArrayFromString('hello'), 0) - - // mock it - dht.dialer.connectToPeer = () => { - const source = (async function * () { // eslint-disable-line require-yield - await delay(1000) - })() - - const sink = async source => { - const res = [] - await pipe( - source, - lp.decode(), - async source => { - for await (const chunk of source) { - res.push(chunk.slice()) - } - } - ) - expect(Message.deserialize(res[0]).type).to.eql(Message.TYPES.PING) - finish() - } - - return { - newStream: () => { - return { stream: { source, sink } } - } - } - } - - dht.network.readMessageTimeout = 100 - - try { - await dht.network.sendRequest(dht.peerId, msg) - } catch (/** @type {any} */ err) { - expect(err).to.exist() - expect(err.message).to.match(/timed out/) - - finish() - } - - return defer.promise - }) }) }) diff --git a/test/peer-list.spec.js b/test/peer-list.spec.js index f6a58142..bcb248d9 100644 --- a/test/peer-list.spec.js +++ b/test/peer-list.spec.js @@ -17,12 +17,12 @@ describe('PeerList', () => { it('basics', () => { const l = new PeerList() - expect(l.push({ id: peers[0] })).to.eql(true) - expect(l.push({ id: peers[0] })).to.eql(false) + expect(l.push(peers[0])).to.eql(true) + expect(l.push(peers[0])).to.eql(false) expect(l).to.have.length(1) - expect(l.push({ id: peers[1] })).to.eql(true) - expect(l.pop()).to.eql({ id: peers[1] }) + expect(l.push(peers[1])).to.eql(true) + expect(l.pop()).to.eql(peers[1]) expect(l).to.have.length(1) - expect(l.toArray()).to.eql([{ id: peers[0] }]) + expect(l.toArray()).to.eql([peers[0]]) }) }) diff --git a/test/peer-queue.spec.js b/test/peer-queue.spec.js deleted file mode 100644 index aee5cadc..00000000 --- a/test/peer-queue.spec.js +++ /dev/null @@ -1,41 +0,0 @@ -/* eslint-env mocha */ -'use strict' - -const { expect } = require('aegir/utils/chai') -const PeerId = require('peer-id') -const PeerQueue = require('../src/peer-list/peer-queue') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - -describe('PeerQueue', () => { - it('basics', async () => { - const p1 = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31')) - const p2 = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a32')) - const p3 = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33')) - const p4 = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a34')) - const p5 = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31')) - - const peer = new PeerId(uint8ArrayFromString('11140beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a31')) - - const pq = await PeerQueue.fromPeerId(peer) - - await pq.enqueue(p3) - await pq.enqueue(p1) - await pq.enqueue(p2) - await pq.enqueue(p4) - await pq.enqueue(p5) - await pq.enqueue(p1) - - expect([ - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue(), - pq.dequeue() - ].map((m) => m.toB58String())).to.be.eql([ - p1, p1, p1, p4, p3, p2 - ].map((m) => m.toB58String())) - - expect(pq.length).to.be.eql(0) - }) -}) diff --git a/test/providers.spec.js b/test/providers.spec.js index ba0235ab..527dd4d6 100644 --- a/test/providers.spec.js +++ b/test/providers.spec.js @@ -5,22 +5,22 @@ const { expect } = require('aegir/utils/chai') const { MemoryDatastore } = require('datastore-core/memory') const { CID } = require('multiformats/cid') const { sha256 } = require('multiformats/hashes/sha2') -const LevelStore = require('datastore-level') +const { LevelDatastore } = require('datastore-level') const path = require('path') const os = require('os') -const Providers = require('../src/providers') +const { Providers } = require('../src/providers') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const createPeerId = require('./utils/create-peer-id') const createValues = require('./utils/create-values') describe('Providers', () => { - let peerIds + let peers let providers before(async function () { this.timeout(10 * 1000) - peerIds = await createPeerId(3) + peers = await createPeerId(3) }) afterEach(() => { @@ -28,43 +28,41 @@ describe('Providers', () => { }) it('simple add and get of providers', async () => { - providers = new Providers(new MemoryDatastore(), peerIds[2]) + providers = new Providers(new MemoryDatastore(), peers[2]) const cid = CID.parse('QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') await Promise.all([ - providers.addProvider(cid, peerIds[0]), - providers.addProvider(cid, peerIds[1]) + providers.addProvider(cid, peers[0]), + providers.addProvider(cid, peers[1]) ]) const provs = await providers.getProviders(cid) const ids = new Set(provs.map((peerId) => peerId.toB58String())) - expect(ids.has(peerIds[0].toB58String())).to.be.eql(true) - expect(ids.has(peerIds[1].toB58String())).to.be.eql(true) + expect(ids.has(peers[0].toB58String())).to.be.eql(true) }) it('duplicate add of provider is deduped', async () => { - providers = new Providers(new MemoryDatastore(), peerIds[2]) + providers = new Providers(new MemoryDatastore(), peers[2]) const cid = CID.parse('QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') await Promise.all([ - providers.addProvider(cid, peerIds[0]), - providers.addProvider(cid, peerIds[0]), - providers.addProvider(cid, peerIds[1]), - providers.addProvider(cid, peerIds[1]), - providers.addProvider(cid, peerIds[1]) + providers.addProvider(cid, peers[0]), + providers.addProvider(cid, peers[0]), + providers.addProvider(cid, peers[1]), + providers.addProvider(cid, peers[1]), + providers.addProvider(cid, peers[1]) ]) const provs = await providers.getProviders(cid) expect(provs).to.have.length(2) const ids = new Set(provs.map((peerId) => peerId.toB58String())) - expect(ids.has(peerIds[0].toB58String())).to.be.eql(true) - expect(ids.has(peerIds[1].toB58String())).to.be.eql(true) + expect(ids.has(peers[0].toB58String())).to.be.eql(true) }) it('more providers than space in the lru cache', async () => { - providers = new Providers(new MemoryDatastore(), peerIds[2], 10) + providers = new Providers(new MemoryDatastore(), peers[2], 10) const hashes = await Promise.all([...new Array(100)].map((i) => { return sha256.digest(uint8ArrayFromString(`hello ${i}`)) @@ -72,17 +70,17 @@ describe('Providers', () => { const cids = hashes.map((h) => CID.createV0(h)) - await Promise.all(cids.map(cid => providers.addProvider(cid, peerIds[0]))) + await Promise.all(cids.map(cid => providers.addProvider(cid, peers[0]))) const provs = await Promise.all(cids.map(cid => providers.getProviders(cid))) expect(provs).to.have.length(100) for (const p of provs) { - expect(p[0].id).to.be.eql(peerIds[0].id) + expect(p[0].id).to.be.eql(peers[0].id) } }) it('expires', async () => { - providers = new Providers(new MemoryDatastore(), peerIds[2]) + providers = new Providers(new MemoryDatastore(), peers[2]) providers.cleanupInterval = 100 providers.provideValidity = 200 @@ -90,15 +88,15 @@ describe('Providers', () => { const cid = CID.parse('QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') await Promise.all([ - providers.addProvider(cid, peerIds[0]), - providers.addProvider(cid, peerIds[1]) + providers.addProvider(cid, peers[0]), + providers.addProvider(cid, peers[1]) ]) const provs = await providers.getProviders(cid) expect(provs).to.have.length(2) - expect(provs[0].id).to.be.eql(peerIds[0].id) - expect(provs[1].id).to.be.eql(peerIds[1].id) + expect(provs[0].id).to.be.eql(peers[0].id) + expect(provs[1].id).to.be.eql(peers[1].id) await new Promise(resolve => setTimeout(resolve, 400)) @@ -112,32 +110,31 @@ describe('Providers', () => { const p = path.join( os.tmpdir(), (Math.random() * 100).toString() ) - const store = new LevelStore(p) - providers = new Providers(store, peerIds[2], 10) + const store = new LevelDatastore(p) + await store.open() + providers = new Providers(store, peers[2], 10) console.log('starting') // eslint-disable-line no-console - const res = await Promise.all([ + const [createdValues, createdPeers] = await Promise.all([ createValues(100), createPeerId(600) ]) console.log('got values and peers') // eslint-disable-line no-console - const values = res[0] - const peers = res[1] const total = Date.now() - for (const v of values) { - for (const p of peers) { - await providers.addProvider(v.cid, p.id) + for (const v of createdValues) { + for (const p of createdPeers) { + await providers.addProvider(v.cid, p) } } - console.log('addProvider %s peers %s cids in %sms', peers.length, values.length, Date.now() - total) // eslint-disable-line no-console - console.log('starting profile with %s peers and %s cids', peers.length, values.length) // eslint-disable-line no-console + console.log('addProvider %s peers %s cids in %sms', createdPeers.length, createdValues.length, Date.now() - total) // eslint-disable-line no-console + console.log('starting profile with %s peers and %s cids', createdPeers.length, createdValues.length) // eslint-disable-line no-console for (let i = 0; i < 3; i++) { const start = Date.now() - for (const v of values) { + for (const v of createdValues) { await providers.getProviders(v.cid) console.log('query %sms', (Date.now() - start)) // eslint-disable-line no-console } diff --git a/test/query.spec.js b/test/query.spec.js index 99fd81b1..6b99fa34 100644 --- a/test/query.spec.js +++ b/test/query.spec.js @@ -2,681 +2,593 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const pDefer = require('p-defer') const delay = require('delay') -const Query = require('../src/query') -const kadUtils = require('../src/utils') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') - +const { QueryManager } = require('../src/query/manager') const createPeerId = require('./utils/create-peer-id') -const TestDHT = require('./utils/test-dht') -const createDisjointTracks = require('./utils/create-disjoint-tracks') - -describe('Query', () => { - let peerIds - let tdht - let dht - - before(async () => { - peerIds = await createPeerId(40) - }) - - beforeEach(async () => { - tdht = new TestDHT() - ;[dht] = await tdht.spawn(1) - }) +const all = require('it-all') +const drain = require('it-drain') +const { AbortController, AbortSignal } = require('native-abort-controller') +const { sortClosestPeers } = require('./utils/sort-closest-peers') +const { convertBuffer } = require('../src/utils') +const { + peerResponseEvent, + valueEvent, + queryErrorEvent +} = require('../src/query/events') + +/** + * @typedef {import('peer-id')} PeerId + * @typedef {import('../src/types').QueryEvent} QueryEvent + */ + +describe('QueryManager', () => { + /** @type {PeerId} */ + let ourPeerId + /** @type {PeerId[]} */ + let peers + /** @type {Uint8Array} */ + let key + + /** + * @param {Record} opts + */ + function createTopology (opts) { + /** @type {Record} */ + const topology = {} + + Object.keys(opts).forEach(key => { + const id = parseInt(key) + const peer = peers[id] + const entry = {} + const config = opts[id] + + if (config.delay) { + entry.delay = config.delay + } - afterEach(() => { - tdht.teardown() - }) + if (config.value !== undefined) { + entry.event = valueEvent({ peer, value: config.value }) + } else if (config.error) { + entry.event = queryErrorEvent({ peer, error: config.error }) + } else { + entry.event = peerResponseEvent({ + peer, + closer: (config.closerPeers || []).map((id) => ({ + id: peers[id], + multiaddrs: [] + })) + }) + } - it('simple run', async () => { - const peerId = dht.peerId + topology[peer.toB58String()] = entry + }) - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + return topology + } - let i = 0 - const queryFunc = async (p) => { // eslint-disable-line require-await - if (i++ === 1) { - expect(p.id).to.eql(peerIds[2].id) + /** + * @param {Record} topology + */ + function createQueryFunction (topology) { + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer }) { + const res = topology[peer.toB58String()] - return { - value: uint8ArrayFromString('cool'), - pathComplete: true - } - } - expect(p.id).to.eql(peerIds[1].id) - return { - closerPeers: [{ id: peerIds[2] }] + if (res.delay) { + await delay(res.delay) } + + yield res.event } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1]]) + return queryFunc + } + + before(async () => { + const unsortedPeers = await createPeerId(40) + ourPeerId = unsortedPeers.pop() + key = unsortedPeers.pop().toBytes() - expect(res.paths[0].value).to.eql(uint8ArrayFromString('cool')) - expect(res.paths[0].success).to.eql(true) - expect(res.finalSet.size).to.eql(2) + // sort remaining peers by XOR distance to the key, low -> high + peers = await sortClosestPeers(unsortedPeers, await convertBuffer(key)) }) - it('does not return an error if only some queries error', async () => { - const peerId = dht.peerId + it('does not run queries before start', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1 }) - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + // @ts-expect-error not enough params + await expect(all(manager.run())).to.eventually.be.rejectedWith(/not started/) + }) - let i = 0 - const visited = [] - const queryFunc = async (p) => { // eslint-disable-line require-await - visited.push(p) + it('does not run queries after stop', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1 }) + manager.start() + manager.stop() - if (i++ === 1) { - throw new Error('fail') - } + // @ts-expect-error not enough params + await expect(all(manager.run())).to.eventually.be.rejectedWith(/not started/) + }) - return { - closerPeers: [{ id: peerIds[2] }] - } + it('should pass query context', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1 }) + manager.start() + + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * (context) { // eslint-disable-line require-await + expect(context).to.have.property('key').that.equalBytes(key) + expect(context).to.have.property('peer').that.deep.equals(peers[0]) + expect(context).to.have.property('signal').that.is.an.instanceOf(AbortSignal) + expect(context).to.have.property('pathIndex').that.equals(0) + expect(context).to.have.property('numPaths').that.equals(1) + + yield valueEvent({ + peer: context.peer, + value: uint8ArrayFromString('cool') + }) } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1]]) + const results = await all(manager.run(key, peers, queryFunc)) - // Should have visited - // - the initial peer passed to the query: peerIds[1] - // - the peer returned in closerPeers: peerIds[2] - expect(visited).to.eql([peerIds[1], peerIds[2]]) + expect(results).to.have.lengthOf(1) + expect(results).to.deep.containSubset([{ + value: uint8ArrayFromString('cool') + }]) - // The final set should only contain peers that were successfully queried - // (ie no errors) - expect(res.finalSet.size).to.eql(1) - expect(res.finalSet.has(peerIds[1])).to.equal(true) + manager.stop() }) - it('returns an error if all queries error', async () => { - const peerId = dht.peerId - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + it('simple run - succeed finding value', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() + + const peersQueried = [] + + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer, signal }) { // eslint-disable-line require-await + expect(signal).to.be.an.instanceOf(AbortSignal) + peersQueried.push(peer) + + if (peersQueried.length === 1) { + // query more peers + yield peerResponseEvent({ + peer, + closer: peers.slice(0, 5).map(id => ({ id, multiaddrs: [] })) + }) + } else if (peersQueried.length === 6) { + // all peers queried, return result + yield valueEvent({ + peer, + value: uint8ArrayFromString('cool') + }) + } else { + // a peer that cannot help in our query + yield peerResponseEvent({ + peer + }) + } + } - const queryFunc = async (p) => { throw new Error('fail') } // eslint-disable-line require-await - const q = new Query(dht, peerId.id, () => queryFunc) + const results = await all(manager.run(key, [peers[7]], queryFunc)) - try { - await q.run([peerIds[1]]) - } catch (/** @type {any} */ err) { - expect(err).to.exist() - expect(err.message).to.eql('fail') - return - } + // e.g. our starting peer plus the 5x closerPeers returned n the first iteration + expect(results).to.have.lengthOf(6) + expect(results).to.deep.containSubset([{ + value: uint8ArrayFromString('cool') + }]) + // should be a result in there somewhere - throw new Error('should return an error if all queries error') + manager.stop() }) - it('returns empty run if initial peer list is empty', async () => { - const peerId = dht.peerId - const queryFunc = async (p) => {} + it('simple run - fail to find value', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() + + const peersQueried = [] + + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer }) { // eslint-disable-line require-await + peersQueried.push(peer) + + if (peersQueried.length === 1) { + // query more peers + yield peerResponseEvent({ + peer, + closer: peers.slice(0, 5).map(id => ({ id, multiaddrs: [] })) + }) + } else { + // a peer that cannot help in our query + yield peerResponseEvent({ + peer + }) + } + } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([]) + const results = await all(manager.run(key, [peers[7]], queryFunc)) - // Should not visit any peers - expect(res.paths.length).to.eql(0) - expect(res.finalSet.size).to.eql(0) + // e.g. our starting peer plus the 5x closerPeers returned n the first iteration + expect(results).to.have.lengthOf(6) + // should not be a result in there + expect(results.find(res => res.name === 'VALUE')).to.not.be.ok() + + manager.stop() }) - it('only closerPeers', async () => { - const peerId = dht.peerId + it('should abort a query', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 2, alpha: 1 }) + manager.start() + + const controller = new AbortController() + let aborted + + // 0 -> 10 -> 11 -> 12... + // 1 -> 20 -> 21 -> 22... + const topology = createTopology({ + 0: { closerPeers: [10] }, + 10: { closerPeers: [11] }, + 11: { closerPeers: [12] }, + 1: { closerPeers: [20] }, + 20: { closerPeers: [21] }, + 21: { closerPeers: [22] } + }) - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer, signal }) { // eslint-disable-line require-await + signal.addEventListener('abort', () => { + aborted = true + }) - const queryFunc = async (p) => { // eslint-disable-line require-await - return { - closerPeers: [{ id: peerIds[2] }] - } + await delay(1000) + + yield topology[peer.toB58String()].event } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1]]) + setTimeout(() => { + controller.abort() + }, 10) + + await expect(all(manager.run(key, peers, queryFunc, { signal: controller.signal }))).to.eventually.be.rejected().with.property('code', 'ERR_QUERY_ABORTED') - expect(res.finalSet.size).to.eql(2) + expect(aborted).to.be.true() + + manager.stop() }) - it('only closerPeers concurrent', async () => { - const peerId = dht.peerId - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} - - // 1 -> 8 - // 2 -> 4 -> 5 - // 6 -> 7 - // 3 -> 9 -> 10 - const topology = { - [peerIds[1].toB58String()]: [ - { id: peerIds[8] } - ], - - [peerIds[2].toB58String()]: [ - { id: peerIds[4] }, - { id: peerIds[6] } - ], - [peerIds[4].toB58String()]: [ - { id: peerIds[5] } - ], - [peerIds[6].toB58String()]: [ - { id: peerIds[7] } - ], - [peerIds[3].toB58String()]: [ - { id: peerIds[9] } - ], - [peerIds[9].toB58String()]: [ - { id: peerIds[10] } - ] - } + it('should allow a sub-query to timeout without aborting the whole query', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 2, alpha: 2 }) + manager.start() + + // 2 -> 1 -> 0 + // 4 -> 3 -> 0 + const topology = createTopology({ + 0: { value: uint8ArrayFromString('true') }, + 1: { delay: 1000, closerPeers: [0] }, + 2: { delay: 1000, closerPeers: [1] }, + 3: { delay: 10, closerPeers: [0] }, + 4: { delay: 10, closerPeers: [3] } + }) - const queryFunc = async (p) => { // eslint-disable-line require-await - const closer = topology[p.toB58String()] - return { - closerPeers: closer || [] - } - } + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer, signal }) { // eslint-disable-line require-await + let aborted = false - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1], peerIds[2], peerIds[3]]) + signal.addEventListener('abort', () => { + aborted = true + }) - // Should visit all peers - expect(res.finalSet.size).to.eql(10) - }) + const res = topology[peer.toB58String()] - it('early success', async () => { - const peerId = dht.peerId - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} - - // 1 -> 2 -> 3 -> 4 - const topology = { - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - // Should stop here because pathComplete is true - [peerIds[2].toB58String()]: { - closer: [{ id: peerIds[3] }], - pathComplete: true - }, - // Should not reach here because previous query returns pathComplete - [peerIds[3].toB58String()]: { - closer: [{ id: peerIds[4] }] + if (res.delay) { + await delay(res.delay) } - } - const queryFunc = async (p) => { // eslint-disable-line require-await - const res = topology[p.toB58String()] || {} - return { - closerPeers: res.closer || [], - value: res.value, - pathComplete: res.pathComplete + if (aborted) { + throw new Error('Aborted by signal') } + + yield res.event } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1]]) + const result = await all(manager.run(key, [peers[2], peers[4]], queryFunc, { queryFuncTimeout: 500 })) - // Should complete successfully - expect(res.paths.length).to.eql(1) - expect(res.paths[0].success).to.eql(true) + // should have traversed through the three nodes to the value and the one that timed out + expect(result).to.have.lengthOf(4) + expect(result).to.have.deep.nested.property('[2].value', uint8ArrayFromString('true')) + expect(result).to.have.nested.property('[3].error.message', 'Aborted by signal') - // Should only visit peers up to the success peer - expect(res.finalSet.size).to.eql(2) + manager.stop() }) - it('all queries stop after shutdown', async () => { - const deferShutdown = pDefer() - const [dhtA] = await tdht.spawn(1) - const peerId = dht.peerId - - // mock this so we can dial non existing peers - dhtA.dialer.dial = (peer) => {} - - // 1 -> 2 -> 3 -> 4 - const topology = { - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - [peerIds[2].toB58String()]: { - closer: [{ id: peerIds[3] }] - }, - // Should not reach here because query gets shut down - [peerIds[3].toB58String()]: { - closer: [{ id: peerIds[4] }] + it('does not return an error if only some queries error', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 10 }) + manager.start() + + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer, pathIndex }) { // eslint-disable-line require-await + if (pathIndex % 2 === 0) { + yield queryErrorEvent({ + peer, + error: new Error('Urk!') + }) + } else { + yield peerResponseEvent({ peer }) } } - const visited = [] - const queryFunc = async (p) => { - visited.push(p) - - const getResult = async () => { - const res = topology[p.toB58String()] || {} - // this timeout is necesary so `dhtA.stop` has time to stop the - // requests before they all complete - await new Promise(resolve => setTimeout(resolve, 100)) - return { - closerPeers: res.closer || [] - } - } + const results = await all(manager.run(key, peers, queryFunc)) - // Shut down after visiting peerIds[2] - if (p.toB58String() === peerIds[2].toB58String()) { - dhtA.stop() - setTimeout(checkExpectations, 100) - return getResult() + // didn't add any extra peers during the query + expect(results).to.have.lengthOf(manager._disjointPaths) + // should not be a result in there + expect(results.find(res => res.name === 'VALUE')).to.not.be.ok() + // half of the results should have the error property + expect(results.reduce((acc, curr) => { + if (curr.name === 'QUERY_ERROR') { + return acc + 1 } - return getResult() - } - - const q = new Query(dhtA, peerId.id, () => queryFunc) - await q.run([peerIds[1]]) - - function checkExpectations () { - // Should only visit peers up to the point where we shut down - expect(visited).to.eql([peerIds[1], peerIds[2]]) - deferShutdown.resolve() - } + return acc + }, 0)).to.equal(5) - return deferShutdown.promise + manager.stop() }) - it('queries run after shutdown return immediately', async () => { - const [dhtA] = await tdht.spawn(1) - const peerId = dht.peerId - - // mock this so we can dial non existing peers - dhtA.dialer.dial = (peer, callback) => callback() - - // 1 -> 2 -> 3 - const topology = { - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - [peerIds[2].toB58String()]: { - closer: [{ id: peerIds[3] }] - } - } + it('returns empty run if initial peer list is empty', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 10 }) + manager.start() - const queryFunc = async (p) => { // eslint-disable-line require-await - const res = topology[p.toB58String()] || {} - return { - closerPeers: res.closer || [] - } + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer }) { // eslint-disable-line require-await + yield valueEvent({ peer, value: uint8ArrayFromString('cool') }) } - const q = new Query(dhtA, peerId.id, () => queryFunc) + const results = await all(manager.run(key, [], queryFunc)) - await dhtA.stop() - const res = await q.run([peerIds[1]]) + expect(results).to.have.lengthOf(0) - // Should not visit any peers - expect(res.paths.length).to.eql(0) - expect(res.finalSet.size).to.eql(0) + manager.stop() }) - it('disjoint path values', async () => { - const peerId = dht.peerId - const values = ['v0', 'v1'].map((str) => uint8ArrayFromString(str)) - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + it('should query closer peers first', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() + + // 9 -> 8 -> 7 -> 6 -> 5 -> 0 + // \-> 4 -> 3 -> 2 -> 1 -> 0 <-- should take this branch first + const topology = createTopology({ + 9: { closerPeers: [8, 4] }, + 8: { closerPeers: [7] }, + 7: { closerPeers: [6] }, + 6: { closerPeers: [5] }, + 5: { closerPeers: [0] }, + 4: { closerPeers: [3] }, + 3: { closerPeers: [2] }, + 2: { closerPeers: [1] }, + 1: { closerPeers: [0] }, + 0: { value: uint8ArrayFromString('hello world') } + }) - // 1 -> 2 -> 3 (v0) - // 4 -> 5 (v1) - const topology = { - // Top level node - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - [peerIds[2].toB58String()]: { - closer: [{ id: peerIds[3] }] - }, - // v0 - [peerIds[3].toB58String()]: { - value: values[0], - pathComplete: true - }, + const results = await all(manager.run(key, [peers[9]], createQueryFunction(topology))) + const traversedPeers = results.map(event => event.peer) + + expect(traversedPeers).to.deep.equal([ + peers[9], + peers[4], + peers[3], + peers[2], + peers[1], + peers[0], + peers[8], + peers[7], + peers[6], + peers[5] + ]) + + manager.stop() + }) - // Top level node - [peerIds[4].toB58String()]: { - closer: [{ id: peerIds[5] }] - }, - // v1 - [peerIds[5].toB58String()]: { - value: values[1], - pathComplete: true - } + it('only closerPeers', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() + + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer }) { // eslint-disable-line require-await + yield peerResponseEvent({ + peer: peer, + closer: [{ + id: peers[2], + multiaddrs: [] + }] + }) } - const queryFunc = async (p) => { - const res = topology[p.toB58String()] || {} - await new Promise(resolve => setTimeout(resolve, res.delay)) - return { - closerPeers: res.closer || [], - value: res.value, - pathComplete: res.pathComplete - } - } + const results = await all(manager.run(key, [peers[3]], queryFunc)) - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1], peerIds[4]]) + expect(results).to.have.lengthOf(2) + expect(results).to.have.deep.nested.property('[0].closer[0].id', peers[2]) + expect(results).to.have.deep.nested.property('[1].closer[0].id', peers[2]) - // We should get back the values from both paths - expect(res.paths.length).to.eql(2) - expect(res.paths[0].value).to.eql(values[0]) - expect(res.paths[0].success).to.eql(true) - expect(res.paths[1].value).to.eql(values[1]) - expect(res.paths[1].success).to.eql(true) + manager.stop() }) - it('disjoint path values with early completion', async () => { - const peerId = dht.peerId - const values = ['v0', 'v1'].map((str) => uint8ArrayFromString(str)) - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} - - // 1 -> 2 (delay) -> 3 - // 4 -> 5 [query complete] - const topology = { - // Top level node - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - // This query has a delay which means it only returns after the other - // path has already indicated the query is complete, so its result - // should be ignored - [peerIds[2].toB58String()]: { - delay: 100, - closer: [{ id: peerIds[3] }] - }, - // Query has stopped by the time we reach here, should be ignored - [peerIds[3].toB58String()]: { - value: values[0], - pathComplete: true - }, + it('only closerPeers concurrent', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 3 }) + manager.start() + + // 9 -> 2 + // 8 -> 6 -> 4 + // 5 -> 3 + // 7 -> 1 -> 0 + const topology = createTopology({ + 0: { closerPeers: [] }, + 1: { closerPeers: [0] }, + 2: { closerPeers: [] }, + 3: { closerPeers: [] }, + 4: { closerPeers: [] }, + 5: { closerPeers: [3] }, + 6: { closerPeers: [4, 5] }, + 7: { closerPeers: [1] }, + 8: { closerPeers: [6] }, + 9: { closerPeers: [2] } + }) - // Top level node - [peerIds[4].toB58String()]: { - closer: [{ id: peerIds[5] }] - }, - // This peer indicates that the query is complete - [peerIds[5].toB58String()]: { - closer: [{ id: peerIds[2] }], - value: values[1], - queryComplete: true - } - } + const results = await all(manager.run(key, [peers[9], peers[8], peers[7]], createQueryFunction(topology))) - const visited = [] - const queryFunc = async (p) => { - visited.push(p) - - const res = topology[p.toB58String()] || {} - await delay(res.delay) - return { - closerPeers: res.closer || [], - value: res.value, - pathComplete: res.pathComplete, - queryComplete: res.queryComplete - } - } + // Should visit all peers + expect(results).to.have.lengthOf(10) - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1], peerIds[4]]) + manager.stop() + }) - // We should only get back the value from the path 4 -> 5 - expect(res.paths.length).to.eql(1) - expect(res.paths[0].value).to.eql(values[1]) - expect(res.paths[0].success).to.eql(true) + it('queries stop after shutdown', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() - // Wait a little bit to make sure we don't continue down another path - // after finding a successful path - await delay(300) - if (visited.indexOf(peerIds[3]) !== -1) { - expect.fail('Query continued after success was returned') - } - }) + // 3 -> 2 -> 1 -> 0 + const topology = createTopology({ + 0: { closerPeers: [] }, + // Should not reach here because query gets shut down + 1: { closerPeers: [0] }, + 2: { closerPeers: [1] }, + 3: { closerPeers: [2] } + }) - it('disjoint path continue other paths after error on one path', async () => { - const peerId = dht.peerId - const values = ['v0', 'v1'].map((str) => uint8ArrayFromString(str)) + /** @type {PeerId[]} */ + const visited = [] - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + /** @type {import('../src/query/types').QueryFunc} */ + const queryFunc = async function * ({ peer }) { // eslint-disable-line require-await + visited.push(peer) - // 1 -> 2 (delay) -> 3 [pathComplete] - // 4 -> 5 [error] -> 6 - const topology = { - // Top level node - [peerIds[1].toB58String()]: { - closer: [{ id: peerIds[2] }] - }, - // This query has a delay which means it only returns after the other - // path has already returned an error - [peerIds[2].toB58String()]: { - delay: 100, - closer: [{ id: peerIds[3] }] - }, - // Success peer, should get this value back at the end - [peerIds[3].toB58String()]: { - value: values[0], - pathComplete: true - }, + const getResult = async () => { + const res = topology[peer.toB58String()] + // this delay is necessary so `dhtA.stop` has time to stop the + // requests before they all complete + await delay(100) - // Top level node - [peerIds[4].toB58String()]: { - closer: [{ id: peerIds[5] }] - }, - // Return an error at this point - [peerIds[5].toB58String()]: { - closer: [{ id: peerIds[6] }], - error: true - }, - // Should never reach here - [peerIds[6].toB58String()]: { - value: values[1], - pathComplete: true + return res.event } - } - const visited = [] - const queryFunc = async (p) => { - visited.push(p) + // Shut down after visiting peers[2] + if (peer === peers[2]) { + manager.stop() - const res = topology[p.toB58String()] || {} - await new Promise(resolve => setTimeout(resolve, res.delay)) - if (res.error) { - throw new Error('path error') - } - return { - closerPeers: res.closer || [], - value: res.value, - pathComplete: res.pathComplete + yield getResult() } + + yield getResult() } - const q = new Query(dht, peerId.id, () => queryFunc) - const res = await q.run([peerIds[1], peerIds[4]]) + // shutdown will cause the query to stop early but without an error + await drain(manager.run(key, [peers[3]], queryFunc)) - // We should only get back the value from the path 1 -> 2 -> 3 - expect(res.paths.length).to.eql(1) - expect(res.paths[0].value).to.eql(values[0]) - expect(res.paths[0].success).to.eql(true) + // Should only visit peers up to the point where we shut down + expect(visited).to.have.lengthOf(2) + expect(visited).to.deep.include(peers[3]) + expect(visited).to.deep.include(peers[2]) }) - it('stop after finding k closest peers', async () => { - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} - - // Sort peers by distance from dht.peerId - const peerZeroDhtKey = await kadUtils.convertPeerId(dht.peerId) - const sorted = await kadUtils.sortClosestPeers(peerIds, peerZeroDhtKey) - - // Local node has nodes 10, 16 and 18 in k-bucket - const initial = [sorted[10], sorted[16], sorted[18]] - - // Should zoom in to peers near target, and then zoom out again until it - // has successfully queried 20 peers - const topology = { - // Local node has nodes 10, 16 and 18 in k-bucket - 10: [12, 20, 22, 24, 26, 28], - 16: [14, 18, 20, 22, 24, 26], - 18: [4, 6, 8, 12, 14, 16], - - 26: [24, 28, 30, 38], - 30: [14, 28], - 38: [2], - - // Should zoom out from this point, until it has 20 peers - 2: [13], - 13: [15], - 15: [17], - - // Right before we get to 20 peers, it finds some new peers that are - // closer than some of the ones it has already queried - 17: [1, 3, 5, 11], - 1: [7, 9], - 9: [19], - - // At this point it's visited 20 (actually more than 20 peers), and - // there are no closer peers to be found, so it should stop querying. - // Because there are 3 paths, each with a worker queue with - // concurrency 3, the exact order in which peers are visited is - // unpredictable, so we add a long tail and below we test to make - // sure that it never reaches the end of the tail. - 19: [21], - 21: [23], - 23: [25], - 25: [27], - 27: [29], - 29: [31] - } + it('disjoint path values', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 2 }) + manager.start() - const peerIndex = (peerId) => sorted.findIndex(p => p === peerId) - const peerIdToPeerData = (peerId) => peerIds.find(pi => pi === peerId) + const values = ['v0', 'v1'].map((str) => uint8ArrayFromString(str)) - const visited = [] - const queryFunc = async (peerId) => { // eslint-disable-line require-await - visited.push(peerId) - const i = peerIndex(peerId) - const closerIndexes = topology[i] || [] - const closerPeers = closerIndexes.map(j => peerIdToPeerData(sorted[j])).map((p) => ({ id: p })) - return { closerPeers } - } + // 2 -> 1 -> 0 (v0) + // 4 -> 3 (v1) + const topology = createTopology({ + 0: { value: values[0] }, + // Top level node + 1: { closerPeers: [0] }, + 2: { closerPeers: [1] }, + 3: { value: values[1] }, + 4: { closerPeers: [3] } + }) - const q = new Query(dht, dht.peerId.id, () => queryFunc) - const res = await q.run(initial) + const results = await all(manager.run(key, [peers[2], peers[4]], createQueryFunction(topology))) - // Should query 19 peers, then find some peers closer to the key, and - // finally stop once those closer peers have been queried - const expectedVisited = new Set([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 38]) - const visitedSet = new Set(visited.map(peerIndex)) - for (const i of expectedVisited) { - expect(visitedSet.has(i)) - } + // visited all the nodes + expect(results).to.have.lengthOf(5) - // Should never get to end of tail (see note above) - expect(visited.find(p => peerIndex(p) === 29)).not.to.exist() - - // Final set should have 20 peers, and the closer peers that were - // found near the end of the query should displace further away - // peers that were found at the beginning - expect(res.finalSet.size).to.eql(20) - expect(res.finalSet.has(sorted[1])).to.eql(true) - expect(res.finalSet.has(sorted[3])).to.eql(true) - expect(res.finalSet.has(sorted[5])).to.eql(true) - expect(res.finalSet.has(sorted[38])).to.eql(false) - }) + // found both values + expect(results).to.deep.containSubset([{ + value: values[0] + }]) + expect(results).to.deep.containSubset([{ + value: values[1] + }]) - /* - * This test creates two disjoint tracks of peers, one for - * each of the query's two paths to follow. The "good" - * track that leads to the target initially has high - * distances to the target, while the "bad" track that - * goes nowhere has small distances to the target. - * Only by going down both simultaneously will it find - * the target before the end of the bad track. The greedy - * behavior without disjoint paths would reach the target - * only after visiting every single peer. - * - * xor distance to target - * far <-----------------------------------------------> close - * - * - * ... - * - */ - it('uses disjoint paths', async () => { - const goodLength = 3 - const samplePeerIds = peerIds.slice(0, 12) - const { - targetId, - starts, - getResponse - } = await createDisjointTracks(samplePeerIds, goodLength) - - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} - let badEndVisited = false - let targetVisited = false - - const q = new Query(dht, targetId.id, (trackNum) => { - return async (p) => { // eslint-disable-line require-await - const response = getResponse(p, trackNum) - expect(response).to.exist() // or we aren't on the right track - if (response.end && !response.pathComplete) { - badEndVisited = true - } - if (response.pathComplete) { - targetVisited = true - expect(badEndVisited).to.eql(false) - } - return response - } - }) - q.concurrency = 1 - const res = await q.run(starts) - // we should reach the target node - expect(targetVisited).to.eql(true) - // we should visit all nodes (except the target) - expect(res.finalSet.size).to.eql(samplePeerIds.length - 1) - // there should be one successful path - expect(res.paths.length).to.eql(1) + manager.stop() }) - it('should discover closer peers', () => { - const discoverDefer = pDefer() - const peerId = dht.peerId + it('disjoint path continue other paths after error on one path', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 2 }) + manager.start() + + // 2 -> 1 (delay) -> 0 [pathComplete] + // 5 -> 4 [error] -> 3 + const topology = createTopology({ + 0: { value: uint8ArrayFromString('true') }, + // This query has a delay which means it only returns after the other + // path has already returned an error + 1: { delay: 100, closerPeers: [0] }, + 2: { closerPeers: [1] }, + 3: { value: uint8ArrayFromString('false') }, + // Return an error at this point + 4: { closerPeers: [3], error: new Error('Nooo!') }, + 5: { closerPeers: [4] } + }) - // mock this so we can dial non existing peers - dht.dialer.dial = () => {} + const results = await all(manager.run(key, [peers[2], peers[5]], createQueryFunction(topology))) - const queryFunc = async (p) => { // eslint-disable-line require-await - return { - closerPeers: [{ id: peerIds[2] }] - } - } + expect(results).to.deep.containSubset([{ + value: uint8ArrayFromString('true') + }]) + expect(results).to.not.deep.containSubset([{ + value: uint8ArrayFromString('false') + }]) - const q = new Query(dht, peerId.id, () => queryFunc) - q.run([peerIds[1]]) + manager.stop() + }) - dht.once('peer', (peerData) => { - expect(peerData.id).to.eql(peerIds[2]) - discoverDefer.resolve() + it.skip('should end paths when they have no closer peers to those already queried', async () => { + const manager = new QueryManager({ peerId: ourPeerId, disjointPaths: 1, alpha: 1 }) + manager.start() + + // 3 -> 2 -> 1 -> 4 -> 5 -> 6 // should stop at 1 + const topology = createTopology({ + 1: { closerPeers: [4] }, + 2: { closerPeers: [1] }, + 3: { closerPeers: [2] }, + 4: { closerPeers: [5] }, + 5: { closerPeers: [6] }, + 6: {} }) - return discoverDefer.promise + const results = await all(manager.run(key, [peers[3]], createQueryFunction(topology))) + + // should not have a value + expect(results.find(res => res.name === 'VALUE')).to.not.be.ok() + + // should have traversed peers 3, 2 & 1 + expect(results).to.containSubset([{ + peer: peers[3] + }, { + peer: peers[2] + }, { + peer: peers[1] + }]) + + // should not have traversed peers 4, 5 & 6 + expect(results).to.not.containSubset([{ + peer: peers[4] + }, { + peer: peers[5] + }, { + peer: peers[6] + }]) + + manager.stop() }) }) diff --git a/test/query/index.spec.js b/test/query/index.spec.js deleted file mode 100644 index 5ae7ec0a..00000000 --- a/test/query/index.spec.js +++ /dev/null @@ -1,160 +0,0 @@ -/* eslint-env mocha */ -/* eslint max-nested-callbacks: ["error", 6] */ -'use strict' - -const { expect } = require('aegir/utils/chai') -const sinon = require('sinon') -const delay = require('delay') -const PeerStore = require('libp2p/src/peer-store') -const Query = require('../../src/query') -const Path = require('../../src/query/path') -const Run = require('../../src/query/run') -const DHT = require('../../src') -const c = require('../../src/constants') -const createPeerId = require('../utils/create-peer-id') -const { sortClosestPeers } = require('../../src/utils') -const { convertBuffer } = require('../../src/utils') -const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const NUM_IDS = 101 - -describe('Query', () => { - let peerIds - let ourPeerId - before(async () => { - const peers = await createPeerId(NUM_IDS) - - ourPeerId = peers.shift() - peerIds = peers - }) - - describe('get closest peers', () => { - const targetKey = { - key: uint8ArrayFromString('A key to find'), - dhtKey: null - } - let sortedPeers - let dht - - before('get sorted peers', async () => { - const dhtKey = await convertBuffer(targetKey.key) - targetKey.dhtKey = dhtKey - - sortedPeers = await sortClosestPeers(peerIds, targetKey.dhtKey) - }) - - before('create a dht', () => { - const peerStore = new PeerStore({ peerId: ourPeerId }) - dht = new DHT({ - dialer: {}, - peerStore, - peerId: ourPeerId - }) - }) - - afterEach(() => { - sinon.restore() - }) - - it('should end paths when they have no closer peers to whats already been queried', async () => { - const PATHS = 5 - sinon.stub(dht, 'disjointPaths').value(PATHS) - sinon.stub(dht._queryManager, 'running').value(true) - const querySpy = sinon.stub().resolves({}) - - const query = new Query(dht, targetKey.key, () => querySpy) - - const run = new Run(query) - await run.init() - - // Add the sorted peers into 5 paths. This will weight - // the paths with increasingly further peers - const sortedPeerIds = sortedPeers - const peersPerPath = sortedPeerIds.length / PATHS - const paths = [...new Array(PATHS)].map((_, index) => { - const path = new Path(run, query.makePath()) - const start = index * peersPerPath - const peers = sortedPeerIds.slice(start, start + peersPerPath) - peers.forEach(p => path.addInitialPeer(p)) - return path - }) - - // Get the peers of the 2nd closest path, and remove the path - // We don't want to execute it. Just add its peers to peers we've - // already queried. - const queriedPeers = paths.splice(1, 1)[0].initialPeers - await Promise.all(queriedPeers.map((peerId) => run.peersQueried.add(peerId))) - - const continueSpy = sinon.spy(run, 'continueQuerying') - - await run.executePaths(paths) - - // The resulting peers should all be from path 0 as it had the closest - expect(run.peersQueried.peers).to.eql(paths[0].initialPeers) - - // Continue should be called on all `peersPerPath` queries of the first path, - // plus ALPHA (concurrency) of the other 3 paths - expect(continueSpy.callCount).to.eql(peersPerPath + (3 * c.ALPHA)) - - // The query should ONLY have been called on path 0 as it - // was the only path to contain closer peers that what we - // pre populated `run.peersQueried` with - expect(querySpy.callCount).to.eql(peersPerPath) - const finalQueriedPeers = querySpy.getCalls().map(call => call.args[0]) - expect(finalQueriedPeers).to.eql(paths[0].initialPeers) - }) - - it('should continue querying if the path has a closer peer', async () => { - sinon.stub(dht, 'disjointPaths').value(1) - sinon.stub(dht._queryManager, 'running').value(true) - - const querySpy = sinon.stub().resolves({}) - const query = new Query(dht, targetKey.key, () => querySpy) - - const run = new Run(query) - - await run.init() - - const sortedPeerIds = sortedPeers - - // Take the top 15 peers and peers 20 - 25 to seed `run.peersQueried` - // This leaves us with only 16 - 19 as closer peers - const queriedPeers = [ - ...sortedPeerIds.slice(0, 15), - ...sortedPeerIds.slice(20, 25) - ] - - const path = new Path(run, query.makePath()) - // Give the path a closet peer and 15 further peers - const pathPeers = [ - ...sortedPeerIds.slice(15, 16), // 1 closer - ...sortedPeerIds.slice(80, 95) - ] - - pathPeers.forEach(p => path.addInitialPeer(p)) - const returnPeers = sortedPeers.slice(16, 20) - // When the second query happens, which is a further peer, - // return peers 16 - 19 - querySpy.onCall(1).callsFake(async () => { - // this delay ensures the queries finish in serial - // see https://github.com/libp2p/js-libp2p-kad-dht/pull/121#discussion_r286437978 - await delay(10) - return { closerPeers: returnPeers } - }) - - await Promise.all(queriedPeers.map((peerId) => run.peersQueried.add(peerId))) - - await run.executePaths([path]) - - // Querying will stop after the first ALPHA peers are queried - expect(querySpy.callCount).to.eql(c.ALPHA) - - // We'll only get the 1 closest peer from `pathPeers`. - // The worker will be stopped before the `returnedPeers` - // are processed and queried. - expect(run.peersQueried.peers).to.eql([ - ...sortedPeerIds.slice(0, 16), - ...sortedPeerIds.slice(20, 24) - ]) - }) - }) -}) diff --git a/test/routing-table.spec.js b/test/routing-table.spec.js index 1c031ddf..7e5be4e3 100644 --- a/test/routing-table.spec.js +++ b/test/routing-table.spec.js @@ -6,7 +6,7 @@ const PeerId = require('peer-id') const random = require('lodash.random') const sinon = require('sinon') -const RoutingTable = require('../src/routing-table') +const { RoutingTable } = require('../src/routing-table') const kadUtils = require('../src/utils') const createPeerId = require('./utils/create-peer-id') const { PROTOCOL_DHT } = require('../src/constants') @@ -17,16 +17,14 @@ describe('Routing Table', () => { beforeEach(async function () { this.timeout(20 * 1000) - const dht = { + const lipbp2p = { peerId: await PeerId.create({ bits: 512 }), - libp2p: { - dialProtocol: sinon.stub() - } + dialProtocol: sinon.stub() } - table = new RoutingTable(dht, { - kBucketSize: 20, - refreshInterval: 30000 + table = new RoutingTable({ + peerId: lipbp2p.peerId, + dialer: lipbp2p }) }) @@ -115,13 +113,13 @@ describe('Routing Table', () => { table.kb.add(oldPeer) // simulate connection succeeding - table.dht.libp2p.dialProtocol.withArgs(oldPeer.peer, PROTOCOL_DHT).resolves({ close: sinon.stub() }) + table._dialer.dialProtocol.withArgs(oldPeer.peer, PROTOCOL_DHT).resolves({ stream: { close: sinon.stub() } }) // perform the ping await fn() - expect(table.dht.libp2p.dialProtocol.callCount).to.equal(1) - expect(table.dht.libp2p.dialProtocol.calledWith(oldPeer.peer)).to.be.true() + expect(table._dialer.dialProtocol.callCount).to.equal(1) + expect(table._dialer.dialProtocol.calledWith(oldPeer.peer)).to.be.true() // did not add the new peer expect(table.kb.get(newPeer.id)).to.be.null() @@ -160,13 +158,13 @@ describe('Routing Table', () => { table.kb.add(oldPeer) // libp2p fails to dial the old peer - table.dht.libp2p.dialProtocol = sinon.stub().withArgs(oldPeer.peer, PROTOCOL_DHT).rejects(new Error('Could not dial peer')) + table._dialer.dialProtocol = sinon.stub().withArgs(oldPeer.peer, PROTOCOL_DHT).rejects(new Error('Could not dial peer')) // perform the ping await fn() - expect(table.dht.libp2p.dialProtocol.callCount).to.equal(1) - expect(table.dht.libp2p.dialProtocol.calledWith(oldPeer.peer)).to.be.true() + expect(table._dialer.dialProtocol.callCount).to.equal(1) + expect(table._dialer.dialProtocol.calledWith(oldPeer.peer)).to.be.true() // added the new peer expect(table.kb.get(newPeer.id)).to.not.be.null() diff --git a/test/rpc/handlers/add-provider.spec.js b/test/rpc/handlers/add-provider.spec.js index d4b05cdc..b5ec3f58 100644 --- a/test/rpc/handlers/add-provider.spec.js +++ b/test/rpc/handlers/add-provider.spec.js @@ -6,8 +6,8 @@ const { expect } = require('aegir/utils/chai') const { Multiaddr } = require('multiaddr') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const Message = require('../../../src/message') -const handler = require('../../../src/rpc/handlers/add-provider') +const { Message } = require('../../../src/message') +const { AddProviderHandler } = require('../../../src/rpc/handlers/add-provider') const createPeerId = require('../../utils/create-peer-id') const createValues = require('../../utils/create-values') @@ -18,6 +18,7 @@ describe('rpc - handlers - AddProvider', () => { let values let tdht let dht + let handler before(async () => { [peerIds, values] = await Promise.all([ @@ -31,6 +32,12 @@ describe('rpc - handlers - AddProvider', () => { const dhts = await tdht.spawn(1) dht = dhts[0] + + handler = new AddProviderHandler({ + peerId: dht._libp2p.peerId, + providers: dht._lan._providers, + peerStore: dht._libp2p.peerStore + }) }) afterEach(() => tdht.teardown()) @@ -47,7 +54,7 @@ describe('rpc - handlers - AddProvider', () => { tests.forEach((t) => { it(t.error.toString(), async () => { try { - await handler(dht)(peerIds[0], t.message) + await handler.handle(peerIds[0], t.message) } catch (/** @type {any} */ err) { expect(err).to.exist() expect(err.code).to.eql(t.error) @@ -76,31 +83,13 @@ describe('rpc - handlers - AddProvider', () => { } ] - await handler(dht)(peerIds[0], msg) + await handler.handle(peerIds[0], msg) - const provs = await dht.providers.getProviders(cid) + const provs = await dht._lan._providers.getProviders(cid) expect(provs).to.have.length(1) expect(provs[0].id).to.eql(peerIds[0].id) - const bookEntry = dht.peerStore.get(peerIds[0]) + const bookEntry = dht._libp2p.peerStore.get(peerIds[0]) expect(bookEntry.addresses.map((address) => address.multiaddr)).to.eql([ma1]) }) - - it('fall back to sender if providers have no multiaddrs', async () => { - const cid = values[0].cid - const msg = new Message(Message.TYPES.ADD_PROVIDER, cid.bytes, 0) - - msg.providerPeers = [{ - id: peerIds[0], - multiaddrs: [] - }] - - await handler(dht)(peerIds[0], msg) - - const provs = await dht.providers.getProviders(cid) - - expect(dht.peerStore.get(peerIds[0])).to.equal(undefined) - expect(provs).to.have.length(1) - expect(provs[0].id).to.eql(peerIds[0].id) - }) }) diff --git a/test/rpc/handlers/find-node.spec.js b/test/rpc/handlers/find-node.spec.js index 6e82abf7..73a06fc5 100644 --- a/test/rpc/handlers/find-node.spec.js +++ b/test/rpc/handlers/find-node.spec.js @@ -2,9 +2,10 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const Message = require('../../../src/message') -const handler = require('../../../src/rpc/handlers/find-node') +const { Message } = require('../../../src/message') +const { FindNodeHandler } = require('../../../src/rpc/handlers/find-node') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +const { Multiaddr } = require('multiaddr') const T = Message.TYPES.FIND_NODE @@ -15,6 +16,7 @@ describe('rpc - handlers - FindNode', () => { let peerIds let tdht let dht + let handler before(async () => { peerIds = await createPeerId(3) @@ -25,39 +27,104 @@ describe('rpc - handlers - FindNode', () => { const dhts = await tdht.spawn(1) dht = dhts[0] + + handler = new FindNodeHandler({ + peerId: dht._libp2p.peerId, + addressable: dht._libp2p, + peerRouting: dht._lan._peerRouting + }) }) afterEach(() => tdht.teardown()) it('returns self, if asked for self', async () => { - const msg = new Message(T, dht.peerId.id, 0) + const msg = new Message(T, dht._libp2p.peerId.id, 0) - const response = await handler(dht)(peerIds[1], msg) + const response = await handler.handle(peerIds[1], msg) expect(response.closerPeers).to.have.length(1) const peer = response.closerPeers[0] - expect(peer.id.id).to.be.eql(dht.peerId.id) + expect(peer.id.id).to.be.eql(dht._libp2p.peerId.id) }) it('returns closer peers', async () => { const msg = new Message(T, uint8ArrayFromString('hello'), 0) const other = peerIds[1] - await dht._add(other) - const response = await handler(dht)(peerIds[2].id, msg) + await dht._lan._routingTable.add(other) + await dht._libp2p.peerStore.addressBook.set(other, [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.1.5/tcp/4002'), + new Multiaddr('/ip4/221.4.67.0/tcp/4002') + ]) + const response = await handler.handle(peerIds[2].id, msg) expect(response.closerPeers).to.have.length(1) const peer = response.closerPeers[0] expect(peer.id.id).to.be.eql(peerIds[1].id) - expect(peer.multiaddrs).to.be.eql([]) + expect(peer.multiaddrs).to.not.be.empty() }) it('handles no peers found', async () => { const msg = new Message(T, uint8ArrayFromString('hello'), 0) - const response = await handler(dht)(peerIds[2], msg) + const response = await handler.handle(peerIds[2], msg) expect(response.closerPeers).to.have.length(0) }) + + it('returns only lan addresses', async () => { + const msg = new Message(T, uint8ArrayFromString('hello'), 0) + const other = peerIds[1] + + await dht._lan._routingTable.add(other) + await dht._libp2p.peerStore.addressBook.set(other, [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.1.5/tcp/4002'), + new Multiaddr('/ip4/221.4.67.0/tcp/4002') + ]) + + handler = new FindNodeHandler({ + peerId: dht._libp2p.peerId, + addressable: dht._libp2p, + peerRouting: dht._lan._peerRouting, + lan: true + }) + const response = await handler.handle(peerIds[2].id, msg) + + expect(response.closerPeers).to.have.length(1) + const peer = response.closerPeers[0] + + expect(peer.id.id).to.be.eql(peerIds[1].id) + expect(peer.multiaddrs.map(ma => ma.toString())).to.include('/ip4/192.168.1.5/tcp/4002') + expect(peer.multiaddrs.map(ma => ma.toString())).to.not.include('/ip4/221.4.67.0/tcp/4002') + }) + + it('returns only wan addresses', async () => { + const msg = new Message(T, uint8ArrayFromString('hello'), 0) + const other = peerIds[1] + + await dht._lan._routingTable.add(other) + await dht._libp2p.peerStore.addressBook.set(other, [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.1.5/tcp/4002'), + new Multiaddr('/ip4/221.4.67.0/tcp/4002') + ]) + + handler = new FindNodeHandler({ + peerId: dht._libp2p.peerId, + addressable: dht._libp2p, + peerRouting: dht._lan._peerRouting, + lan: false + }) + const response = await handler.handle(peerIds[2].id, msg) + + expect(response.closerPeers).to.have.length(1) + const peer = response.closerPeers[0] + + expect(peer.id.id).to.be.eql(peerIds[1].id) + expect(peer.multiaddrs.map(ma => ma.toString())).to.not.include('/ip4/192.168.1.5/tcp/4002') + expect(peer.multiaddrs.map(ma => ma.toString())).to.include('/ip4/221.4.67.0/tcp/4002') + }) }) diff --git a/test/rpc/handlers/get-providers.spec.js b/test/rpc/handlers/get-providers.spec.js index a641e4fe..5f6b5549 100644 --- a/test/rpc/handlers/get-providers.spec.js +++ b/test/rpc/handlers/get-providers.spec.js @@ -2,10 +2,11 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const Message = require('../../../src/message') +const { Message } = require('../../../src/message') const utils = require('../../../src/utils') -const handler = require('../../../src/rpc/handlers/get-providers') +const { GetProvidersHandler } = require('../../../src/rpc/handlers/get-providers') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +const { Multiaddr } = require('multiaddr') const T = Message.TYPES.GET_PROVIDERS @@ -18,19 +19,29 @@ describe('rpc - handlers - GetProviders', () => { let values let tdht let dht + let handler before(async () => { - [peerIds, values] = await Promise.all([ + tdht = new TestDHT() + + ;[peerIds, values] = await Promise.all([ createPeerId(3), createValues(2) ]) }) beforeEach(async () => { - tdht = new TestDHT() - const dhts = await tdht.spawn(1) dht = dhts[0] + + handler = new GetProvidersHandler({ + peerId: dht._libp2p.peerId, + peerRouting: dht._lan._peerRouting, + providers: dht._lan._providers, + datastore: dht._datastore, + peerStore: dht._libp2p.peerStore, + addressable: dht._libp2p + }) }) afterEach(() => tdht.teardown()) @@ -38,11 +49,7 @@ describe('rpc - handlers - GetProviders', () => { it('errors with an invalid key ', async () => { const msg = new Message(T, uint8ArrayFromString('hello'), 0) - try { - await handler(dht)(peerIds[0], msg) - } catch (/** @type {any} */ err) { - expect(err.code).to.eql('ERR_INVALID_CID') - } + await expect(handler.handle(peerIds[0], msg)).to.eventually.be.rejected().with.property('code', 'ERR_INVALID_CID') }) it('responds with self if the value is in the datastore', async () => { @@ -51,13 +58,13 @@ describe('rpc - handlers - GetProviders', () => { const msg = new Message(T, v.cid.bytes, 0) const dsKey = utils.bufferToKey(v.cid.bytes) - await dht.datastore.put(dsKey, v.value) - const response = await handler(dht)(peerIds[0], msg) + await dht._datastore.put(dsKey, v.value) + const response = await handler.handle(peerIds[0], msg) expect(response.key).to.be.eql(v.cid.bytes) expect(response.providerPeers).to.have.length(1) expect(response.providerPeers[0].id.toB58String()) - .to.eql(dht.peerId.toB58String()) + .to.equal(dht._libp2p.peerId.toB58String()) }) it('responds with listed providers and closer peers', async () => { @@ -67,17 +74,28 @@ describe('rpc - handlers - GetProviders', () => { const prov = peerIds[1] const closer = peerIds[2] - await dht._add(closer) - await dht.providers.addProvider(v.cid, prov) - const response = await handler(dht)(peerIds[0], msg) + await dht._lan._routingTable.add(closer) + await dht._lan._providers.addProvider(v.cid, prov) + await dht._libp2p.peerStore.addressBook.set(prov, [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.1.5/tcp/4002'), + new Multiaddr('/ip4/135.4.67.0/tcp/4002') + ]) + await dht._libp2p.peerStore.addressBook.set(closer, [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.2.6/tcp/4002'), + new Multiaddr('/ip4/21.31.57.23/tcp/4002') + ]) + + const response = await handler.handle(peerIds[0], msg) expect(response.key).to.be.eql(v.cid.bytes) expect(response.providerPeers).to.have.length(1) expect(response.providerPeers[0].id.toB58String()) - .to.eql(prov.toB58String()) + .to.equal(prov.toB58String()) expect(response.closerPeers).to.have.length(1) expect(response.closerPeers[0].id.toB58String()) - .to.eql(closer.toB58String()) + .to.equal(closer.toB58String()) }) }) diff --git a/test/rpc/handlers/get-value.spec.js b/test/rpc/handlers/get-value.spec.js index db139983..f99dec29 100644 --- a/test/rpc/handlers/get-value.spec.js +++ b/test/rpc/handlers/get-value.spec.js @@ -2,10 +2,11 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const Message = require('../../../src/message') -const handler = require('../../../src/rpc/handlers/get-value') +const { Message } = require('../../../src/message') +const { GetValueHandler } = require('../../../src/rpc/handlers/get-value') const utils = require('../../../src/utils') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') +const drain = require('it-drain') const T = Message.TYPES.GET_VALUE @@ -16,6 +17,7 @@ describe('rpc - handlers - GetValue', () => { let peerIds let tdht let dht + let handler before(async () => { peerIds = await createPeerId(2) @@ -26,6 +28,13 @@ describe('rpc - handlers - GetValue', () => { const dhts = await tdht.spawn(1) dht = dhts[0] + + handler = new GetValueHandler({ + peerId: dht._libp2p.peerId, + peerStore: dht._libp2p.peerStore, + peerRouting: dht._lan._peerRouting, + datastore: dht._datastore + }) }) afterEach(() => tdht.teardown()) @@ -34,7 +43,7 @@ describe('rpc - handlers - GetValue', () => { const msg = new Message(T, new Uint8Array(0), 0) try { - await handler(dht)(peerIds[0], msg) + await handler.handle(peerIds[0], msg) } catch (/** @type {any} */ err) { expect(err.code).to.eql('ERR_INVALID_KEY') return @@ -48,8 +57,8 @@ describe('rpc - handlers - GetValue', () => { const value = uint8ArrayFromString('world') const msg = new Message(T, key, 0) - await dht.put(key, value) - const response = await handler(dht)(peerIds[0], msg) + await drain(dht.put(key, value)) + const response = await handler.handle(peerIds[0], msg) expect(response.record).to.exist() expect(response.record.key).to.eql(key) @@ -61,8 +70,8 @@ describe('rpc - handlers - GetValue', () => { const msg = new Message(T, key, 0) const other = peerIds[1] - await dht._add(other) - const response = await handler(dht)(peerIds[0], msg) + await dht._lan._routingTable.add(other) + const response = await handler.handle(peerIds[0], msg) expect(response.closerPeers).to.have.length(1) expect(response.closerPeers[0].id.toB58String()).to.be.eql(other.toB58String()) @@ -70,13 +79,13 @@ describe('rpc - handlers - GetValue', () => { describe('public key', () => { it('self', async () => { - const key = utils.keyForPublicKey(dht.peerId) + const key = utils.keyForPublicKey(dht._libp2p.peerId) const msg = new Message(T, key, 0) - const response = await handler(dht)(peerIds[0], msg) + const response = await handler.handle(peerIds[0], msg) expect(response.record).to.exist() - expect(response.record.value).to.eql(dht.peerId.pubKey.bytes) + expect(response.record.value).to.eql(dht._libp2p.peerId.pubKey.bytes) }) it('other in peerstore', async () => { @@ -85,11 +94,11 @@ describe('rpc - handlers - GetValue', () => { const msg = new Message(T, key, 0) - dht.peerStore.addressBook.add(other, []) - dht.peerStore.keyBook.set(other, other.pubKey) + dht._libp2p.peerStore.addressBook.add(other, []) + dht._libp2p.peerStore.keyBook.set(other, other.pubKey) - await dht._add(other) - const response = await handler(dht)(peerIds[0], msg) + await dht._lan._routingTable.add(other) + const response = await handler.handle(peerIds[0], msg) expect(response.record).to.exist() expect(response.record.value).to.eql(other.pubKey.bytes) }) @@ -99,7 +108,7 @@ describe('rpc - handlers - GetValue', () => { const key = utils.keyForPublicKey(other) const msg = new Message(T, key, 0) - const response = await handler(dht)(peerIds[0], msg) + const response = await handler.handle(peerIds[0], msg) expect(response.record).to.not.exist() }) }) diff --git a/test/rpc/handlers/ping.spec.js b/test/rpc/handlers/ping.spec.js index a0267b1d..d457aeb2 100644 --- a/test/rpc/handlers/ping.spec.js +++ b/test/rpc/handlers/ping.spec.js @@ -2,36 +2,29 @@ 'use strict' const { expect } = require('aegir/utils/chai') -const Message = require('../../../src/message') -const handler = require('../../../src/rpc/handlers/ping') +const { Message } = require('../../../src/message') +const { PingHandler } = require('../../../src/rpc/handlers/ping') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const T = Message.TYPES.PING const createPeerId = require('../../utils/create-peer-id') -const TestDHT = require('../../utils/test-dht') describe('rpc - handlers - Ping', () => { let peerIds - let tdht - let dht + let handler before(async () => { peerIds = await createPeerId(2) }) beforeEach(async () => { - tdht = new TestDHT() - - const dhts = await tdht.spawn(1) - dht = dhts[0] + handler = new PingHandler() }) - afterEach(() => tdht.teardown()) - it('replies with the same message', async () => { const msg = new Message(T, uint8ArrayFromString('hello'), 5) - const response = await handler(dht)(peerIds[0], msg) + const response = await handler.handle(peerIds[0], msg) expect(response).to.be.eql(msg) }) diff --git a/test/rpc/handlers/put-value.spec.js b/test/rpc/handlers/put-value.spec.js index 60338db7..84b46aa6 100644 --- a/test/rpc/handlers/put-value.spec.js +++ b/test/rpc/handlers/put-value.spec.js @@ -7,8 +7,8 @@ const { Record } = require('libp2p-record') const delay = require('delay') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') -const Message = require('../../../src/message') -const handler = require('../../../src/rpc/handlers/put-value') +const { Message } = require('../../../src/message') +const { PutValueHandler } = require('../../../src/rpc/handlers/put-value') const utils = require('../../../src/utils') const createPeerId = require('../../utils/create-peer-id') @@ -20,6 +20,7 @@ describe('rpc - handlers - PutValue', () => { let peerIds let tdht let dht + let handler before(async () => { peerIds = await createPeerId(2) @@ -30,6 +31,11 @@ describe('rpc - handlers - PutValue', () => { const dhts = await tdht.spawn(1) dht = dhts[0] + + handler = new PutValueHandler({ + validators: dht._lan._validators, + datastore: dht._datastore + }) }) afterEach(() => tdht.teardown()) @@ -38,7 +44,7 @@ describe('rpc - handlers - PutValue', () => { const msg = new Message(T, uint8ArrayFromString('hello'), 5) try { - await handler(dht)(peerIds[0], msg) + await handler.handle(peerIds[0], msg) } catch (/** @type {any} */ err) { expect(err.code).to.eql('ERR_EMPTY_RECORD') return @@ -55,19 +61,11 @@ describe('rpc - handlers - PutValue', () => { ) msg.record = record - let eventResponse - dht.onPut = (record, peerId) => { - eventResponse = { record, peerId } - } - - const response = await handler(dht)(peerIds[1], msg) + const response = await handler.handle(peerIds[1], msg) expect(response).to.be.eql(msg) - expect(eventResponse).to.have.property('record').eql(record) - expect(eventResponse).to.have.property('peerId').eql(peerIds[1]) - const key = utils.bufferToKey(uint8ArrayFromString('hello')) - const res = await dht.datastore.get(key) + const res = await dht._datastore.get(key) const rec = Record.deserialize(res) diff --git a/test/rpc/index.spec.js b/test/rpc/index.spec.js index 26bb7f5b..bcc3aaaa 100644 --- a/test/rpc/index.spec.js +++ b/test/rpc/index.spec.js @@ -6,8 +6,8 @@ const pDefer = require('p-defer') const pipe = require('it-pipe') const lp = require('it-length-prefixed') const { collect } = require('streaming-iterables') -const Message = require('../../src/message') -const rpc = require('../../src/rpc') +const { Message } = require('../../src/message') +const { RPC } = require('../../src/rpc') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const createPeerId = require('../utils/create-peer-id') @@ -17,18 +17,34 @@ const toBuffer = require('../utils/to-buffer') describe('rpc', () => { let peerIds let tdht + let rpc + let dht before(async () => { peerIds = await createPeerId(2) tdht = new TestDHT() }) - after(() => tdht.teardown()) + beforeEach(async () => { + const dhts = await tdht.spawn(1) + dht = dhts[0] + + rpc = new RPC({ + routingTable: dht._lan._routingTable, + peerId: dht._libp2p.peerId, + providers: dht._lan._providers, + peerStore: dht._libp2p.peerStore, + addressable: dht._libp2p, + peerRouting: dht._lan._peerRouting, + datastore: dht._lan._datastore, + validators: dht._lan._validators + }) + }) + + afterEach(() => tdht.teardown()) it('calls back with the response', async () => { const defer = pDefer() - const [dht] = await tdht.spawn(1) - const msg = new Message(Message.TYPES.GET_VALUE, uint8ArrayFromString('hello'), 5) const validateMessage = (res) => { @@ -57,7 +73,7 @@ describe('rpc', () => { } } - rpc(dht)({ + rpc.onIncomingStream({ protocol: 'protocol', stream: duplexStream, connection: { diff --git a/test/simulation/index.js b/test/simulation/index.js index 4db56f74..81573931 100644 --- a/test/simulation/index.js +++ b/test/simulation/index.js @@ -7,9 +7,9 @@ const PeerStore = require('libp2p/src/peer-store') const PeerId = require('peer-id') const { base58btc } = require('multiformats/bases/base58') const RoutingTable = require('../../src/routing-table') -const Message = require('../../src/message') +const { Message } = require('../../src/message') const { convertBuffer } = require('../../src/utils') -const { sortClosestPeers } = require('../../src/utils') +const { sortClosestPeers } = require('../utils/sort-closest-peers') const DHT = require('../../src') const { fromString: uint8ArrayFromString } = require('uint8arrays/from-string') const crypto = require('libp2p-crypto') diff --git a/test/utils/create-disjoint-tracks.js b/test/utils/create-disjoint-tracks.js deleted file mode 100644 index aaa6db7c..00000000 --- a/test/utils/create-disjoint-tracks.js +++ /dev/null @@ -1,66 +0,0 @@ -'use strict' - -const { - convertPeerId, - sortClosestPeers -} = require('../../src/utils') - -/* - * Given an array of peerIds, decide on a target, start peers, and - * "next", a successor function for the query to use. See comment - * where this is called for details. - */ -async function createDisjointTracks (ids, goodLength) { - const us = ids[0] - - const ourId = await convertPeerId(us) - let sorted = await sortClosestPeers(ids, ourId) - - const target = sorted[sorted.length - 1] - sorted = sorted.slice(1) // remove our id - const goodTrack = sorted.slice(0, goodLength) - goodTrack.push(target) // push on target - const badTrack = sorted.slice(goodLength, -1) - - if (badTrack.length <= goodTrack.length) { - throw new Error(`insufficient number of peers; good length: ${goodTrack.length}, bad length: ${badTrack.length}`) - } - - const tracks = [goodTrack, badTrack] // array of arrays of nodes - const next = (peer, trackNum) => { - const track = tracks[trackNum] - const pos = track.indexOf(peer) - if (pos < 0) { - return null // peer not on expected track - } - - const nextPos = pos + 1 - // if we're at the end of the track - if (nextPos === track.length) { - if (trackNum === 0) { // good track; pathComplete - return { - end: true, - pathComplete: true - } - } else { // bad track; dead end - return { - end: true, - closerPeers: [] - } - } - } else { - const infoIdx = ids.indexOf(track[nextPos]) - return { - closerPeers: [{ id: ids[infoIdx] }] - } - } - } - - return { - targetId: target, - starts: [goodTrack[0], badTrack[0]], - getResponse: next - } -} - -module.exports = createDisjointTracks diff --git a/test/utils/create-peer-id.js b/test/utils/create-peer-id.js index 2e095525..5da476e1 100644 --- a/test/utils/create-peer-id.js +++ b/test/utils/create-peer-id.js @@ -5,7 +5,7 @@ const PeerId = require('peer-id') /** * Creates multiple PeerIds * - * @param {number} length - The number of `PeerId` to create + * @param {number} length - The number of `PeerId`s to create * @returns {Promise>} */ function createPeerId (length) { diff --git a/test/utils/index.js b/test/utils/index.js index 376eabf5..110544e3 100644 --- a/test/utils/index.js +++ b/test/utils/index.js @@ -1,8 +1,5 @@ 'use strict' -const delay = require('delay') -const pRetry = require('p-retry') -const pTimeout = require('p-timeout') const duplexPair = require('it-pair/duplex') const createMockRegistrar = (registrarRecord) => { @@ -15,6 +12,9 @@ const createMockRegistrar = (registrarRecord) => { handler } }, + unhandle: (multicodec) => { + delete registrarRecord[multicodec] + }, register: ({ multicodecs, _onConnect, _onDisconnect }) => { const rec = registrarRecord[multicodecs[0]] || {} @@ -55,33 +55,6 @@ const ConnectionPair = () => { exports.ConnectionPair = ConnectionPair -exports.waitForWellFormedTables = (dhts, minPeers, avgPeers, waitTimeout) => { - return pTimeout(pRetry(async () => { - let totalPeers = 0 - - const ready = dhts.map((dht) => { - const rtlen = dht.routingTable.size - totalPeers += rtlen - if (minPeers > 0 && rtlen < minPeers) { - return false - } - const actualAvgPeers = totalPeers / dhts.length - if (avgPeers > 0 && actualAvgPeers < avgPeers) { - return false - } - return true - }) - - if (ready.every(Boolean)) { - return - } - await delay(200) - throw new Error('not done yet') - }, { - retries: 50 - }), waitTimeout) -} - // Count how many peers are in b but are not in a exports.countDiffPeers = (a, b) => { const s = new Set() diff --git a/test/utils/sort-closest-peers.js b/test/utils/sort-closest-peers.js new file mode 100644 index 00000000..dabeb4f8 --- /dev/null +++ b/test/utils/sort-closest-peers.js @@ -0,0 +1,30 @@ +'use strict' + +const { xor: uint8ArrayXor } = require('uint8arrays/xor') +const { compare: uint8ArrayCompare } = require('uint8arrays/compare') +const { convertPeerId } = require('../../src/utils') +const all = require('it-all') +const map = require('it-map') + +/** + * Sort peers by distance to the given `kadId`. + * + * @param {Array} peers + * @param {Uint8Array} kadId + */ +exports.sortClosestPeers = async (peers, kadId) => { + const distances = await all(map(peers, async (peer) => { + const id = await convertPeerId(peer) + + return { + peer: peer, + distance: uint8ArrayXor(id, kadId) + } + })) + + return distances + .sort((a, b) => { + return uint8ArrayCompare(a.distance, b.distance) + }) + .map((d) => d.peer) +} diff --git a/test/utils/test-dht.js b/test/utils/test-dht.js index 1bf19398..db44e0e3 100644 --- a/test/utils/test-dht.js +++ b/test/utils/test-dht.js @@ -4,10 +4,7 @@ const PeerStore = require('libp2p/src/peer-store') const pRetry = require('p-retry') const delay = require('delay') const { Multiaddr } = require('multiaddr') - -const KadDHT = require('../../src') -const { PROTOCOL_DHT } = require('../../src/constants') - +const { create } = require('../../src') const createPeerId = require('./create-peer-id') const { createMockRegistrar, @@ -37,52 +34,64 @@ class TestDHT { ...options } - const connectToPeer = (localDHT, peer) => { - const remotePeerB58 = peer.toB58String() + const connectToPeer = async (localDHT, peer, protocol) => { const remoteDht = this.nodes.find( - (node) => node.peerId.toB58String() === remotePeerB58 + (node) => node._libp2p.peerId.equals(peer) ) - const localOnConnect = regRecord[options.protocolPrefix + PROTOCOL_DHT].onConnect - const remoteOnConnect = remoteDht.regRecord[options.protocolPrefix + PROTOCOL_DHT].onConnect + if (remoteDht._clientMode) { + throw new Error('Cannot connect to remote DHT client') + } - const remoteHandler = remoteDht.regRecord[options.protocolPrefix + PROTOCOL_DHT].handler + const localOnConnect = regRecord[protocol].onConnect + const remoteOnConnect = remoteDht.regRecord[protocol].onConnect + const remoteHandler = remoteDht.regRecord[protocol].handler // Notice peers of connection const [c0, c1] = ConnectionPair() - return { - newStream: async () => { - if (remoteDht._clientMode) { - throw new Error('unsupported protocol') - } - - // Trigger on connect for servers connecting - if (!remoteDht._clientMode) await localOnConnect(remoteDht.peerId, c1) - if (!localDHT._clientMode) await remoteOnConnect(peerId, c0) - - await remoteHandler({ - protocol: options.protocolPrefix + PROTOCOL_DHT, - stream: c0.stream, - connection: { - remotePeer: peerId - } - }) - return { stream: c1.stream } - } + // Trigger on connect for servers connecting + await localOnConnect(remoteDht._libp2p.peerId, c1) + + if (!localDHT._clientMode) { + await remoteOnConnect(peerId, c0) } + + await remoteHandler({ + protocol: protocol, + stream: c0.stream, + connection: { + remotePeer: peerId + } + }) + + return { stream: c1.stream } } - const dht = new KadDHT({ + const registrar = createMockRegistrar(regRecord) + + const dht = create({ libp2p: { - multiaddrs: [new Multiaddr('/ip4/0.0.0.0/tcp/4002')] - }, - dialer: { - connectToPeer: (peer) => connectToPeer(dht, peer) + peerId, + multiaddrs: [ + new Multiaddr('/ip4/127.0.0.1/tcp/4002'), + new Multiaddr('/ip4/192.168.1.1/tcp/4002'), + new Multiaddr('/ip4/85.3.31.0/tcp/4002') + ], + peerStore, + dialProtocol: (peer, protocol, options) => connectToPeer(dht, peer, protocol, options), + registrar, + handle: (protocol, fn) => { + registrar.handle(protocol, fn) + }, + unhandle: (protocol) => { + registrar.unhandle(protocol) + }, + on: () => {}, + connectionManager: { + on: () => {} + } }, - registrar: createMockRegistrar(regRecord), - peerStore, - peerId: peerId, validators: { v: { func () { @@ -103,29 +112,38 @@ class TestDHT { ...options }) + // simulate libp2p._onDiscoveryPeer + dht.on('peer', (peerData) => { + if (peerData.id.toB58String() === peerId.toB58String()) { + return + } + + peerData.multiaddrs && peerStore.addressBook.add(peerData.id, peerData.multiaddrs) + peerData.protocols && peerStore.protoBook.set(peerData.id, peerData.protocols) + }) + if (autoStart) { dht.start() } dht.regRecord = regRecord this.nodes.push(dht) + return dht } async connect (dhtA, dhtB) { - const onConnectA = dhtA.regRecord[dhtA.protocol].onConnect - const onConnectB = dhtB.regRecord[dhtB.protocol].onConnect - + const onConnectA = dhtA.regRecord[dhtA._lan._protocol].onConnect + const onConnectB = dhtB.regRecord[dhtB._lan._protocol].onConnect const [c0, c1] = ConnectionPair() - const routingTableChecks = [] // Notice peers of connection if (!dhtB._clientMode) { // B is a server, trigger connect events on A - await onConnectA(dhtB.peerId, c0) + await onConnectA(dhtB._libp2p.peerId, c0) routingTableChecks.push(async () => { - const match = await dhtA.routingTable.find(dhtB.peerId) + const match = await dhtA._lan._routingTable.find(dhtB._libp2p.peerId) if (!match) { await delay(100) @@ -135,11 +153,12 @@ class TestDHT { return match }) } + if (!dhtA._clientMode) { // A is a server, trigger connect events on B - await onConnectB(dhtA.peerId, c1) + await onConnectB(dhtA._libp2p.peerId, c1) routingTableChecks.push(async () => { - const match = await dhtB.routingTable.find(dhtA.peerId) + const match = await dhtB._lan._routingTable.find(dhtA._libp2p.peerId) if (!match) { await delay(100) @@ -151,8 +170,8 @@ class TestDHT { } // Libp2p dial adds multiaddrs to the addressBook - dhtA.peerStore.addressBook.add(dhtB.peerId, dhtB.libp2p.multiaddrs) - dhtB.peerStore.addressBook.add(dhtA.peerId, dhtA.libp2p.multiaddrs) + dhtA._libp2p.peerStore.addressBook.add(dhtB._libp2p.peerId, dhtB._libp2p.multiaddrs) + dhtB._libp2p.peerStore.addressBook.add(dhtA._libp2p.peerId, dhtA._libp2p.multiaddrs) // Check routing tables return Promise.all(routingTableChecks.map(check => {