diff --git a/package-lock.json b/package-lock.json index eca4f5a4de..d7cb00539b 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1593,31 +1593,21 @@ } }, "@matrixai/db": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@matrixai/db/-/db-1.1.5.tgz", - "integrity": "sha512-zPpP/J1A3TLRaQKaGa5smualzjW4Rin4K48cpU5/9ThyXfpVBBp/mrkbDfjL/O5z6YTcuGVf2+yLck8tF8kVUw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@matrixai/db/-/db-1.2.1.tgz", + "integrity": "sha512-1W8TORmRX3q3NugZFn0FTgI0mo/n0nWBTXHKXwwPfxtdyNfi18JCj3HVCwWdToOo87ypnS/mqLDIUTSHbF7F3Q==", "requires": { "@matrixai/async-init": "^1.6.0", - "@matrixai/logger": "^2.0.1", - "@matrixai/workers": "^1.2.3", - "abstract-leveldown": "^7.0.0", + "@matrixai/logger": "^2.1.0", + "@matrixai/workers": "^1.2.5", + "abstract-leveldown": "^7.2.0", "async-mutex": "^0.3.1", "level": "7.0.1", - "levelup": "^5.0.1", + "levelup": "^5.1.1", "sublevel-prefixer": "^1.0.0", - "subleveldown": "^5.0.1", + "subleveldown": "^6.0.1", "threads": "^1.6.5", "ts-custom-error": "^3.2.0" - }, - "dependencies": { - "async-mutex": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", - "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", - "requires": { - "tslib": "^2.3.1" - } - } } }, "@matrixai/id": { @@ -1718,6 +1708,12 @@ "integrity": "sha512-eZxlbI8GZscaGS7kkc/trHTT5xgrjH3/1n2JDwusC9iahPKWMRvRjJSAN5mCXviuTGQ/lHnhvv8Q1YTpnfz9gA==", "dev": true }, + "@types/abstract-leveldown": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/@types/abstract-leveldown/-/abstract-leveldown-7.2.0.tgz", + "integrity": "sha512-q5veSX6zjUy/DlDhR4Y4cU0k2Ar+DT2LUraP00T19WLmTO6Se1djepCCaqU6nQrwcJ5Hyo/CWqxTzrrFg8eqbQ==", + "dev": true + }, "@types/babel__core": { "version": "7.1.16", "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.1.16.tgz", @@ -1768,6 +1764,16 @@ "@types/node": "*" } }, + "@types/encoding-down": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@types/encoding-down/-/encoding-down-5.0.0.tgz", + "integrity": "sha512-G0MlS/+/U2RIQLcSEhhAcoMrXw3hXUCFSKbhbeEljoKMra2kq+NPX6tfOveSWQLX2hJXBo+YrvKgAGe+tFL1Aw==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/level-codec": "*" + } + }, "@types/google-protobuf": { "version": "3.15.5", "resolved": "https://registry.npmjs.org/@types/google-protobuf/-/google-protobuf-3.15.5.tgz", @@ -1829,6 +1835,40 @@ "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", "dev": true }, + "@types/level": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@types/level/-/level-6.0.0.tgz", + "integrity": "sha512-NjaUpukKfCvnV4Wk0jUaodFi2/66HxgpYghc2aV8iP+zk2NMt/9ps1eVlifqOU/+eLzMlDIY69NWkbPaAstukQ==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/encoding-down": "*", + "@types/levelup": "*" + } + }, + "@types/level-codec": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@types/level-codec/-/level-codec-9.0.1.tgz", + "integrity": "sha512-6z7DSlBsmbax3I/bV1Q6jT1nKquDjFl95LURVThdKtwILkRawLYtXdINW19xM95N5kqN2detWb2iGrbUlPwNyw==", + "dev": true + }, + "@types/level-errors": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/level-errors/-/level-errors-3.0.0.tgz", + "integrity": "sha512-/lMtoq/Cf/2DVOm6zE6ORyOM+3ZVm/BvzEZVxUhf6bgh8ZHglXlBqxbxSlJeVp8FCbD3IVvk/VbsaNmDjrQvqQ==", + "dev": true + }, + "@types/levelup": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/levelup/-/levelup-5.1.0.tgz", + "integrity": "sha512-XagSD3VJFWjZWeQnG4mL53PFRPmb6E7dKXdJxexVw85ki82BWOp68N+R6M1t9OYsbmlY+2S0GZcZtVH3gGbeDw==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/level-errors": "*", + "@types/node": "*" + } + }, "@types/nexpect": { "version": "0.4.31", "resolved": "https://registry.npmjs.org/@types/nexpect/-/nexpect-0.4.31.tgz", @@ -2701,12 +2741,9 @@ } }, "catering": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/catering/-/catering-2.1.0.tgz", - "integrity": "sha512-M5imwzQn6y+ODBfgi+cfgZv2hIUI6oYU/0f35Mdb1ujGeqeoI5tOnl9Q13DTH7LW+7er+NYq8stNOKZD/Z3U/A==", - "requires": { - "queue-tick": "^1.0.0" - } + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/catering/-/catering-2.1.1.tgz", + "integrity": "sha512-K7Qy8O9p76sL3/3m7/zLKbRkyOlSZAgzEaLhyj2mXS8PsCud2Eo4hAb8aLtZqHh0QGqLcb9dlJSu6lHRVENm1w==" }, "chalk": { "version": "2.4.2", @@ -4811,11 +4848,6 @@ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==" }, - "immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, "import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", @@ -7649,11 +7681,6 @@ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" }, - "queue-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.0.tgz", - "integrity": "sha512-ULWhjjE8BmiICGn3G8+1L9wFpERNxkf8ysxkAer4+TFdRefDaXOCV5m92aMB9FtBVmn/8sETXLXY6BfW7hyaWQ==" - }, "ramda": { "version": "0.27.1", "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.27.1.tgz", @@ -8645,124 +8672,16 @@ "integrity": "sha1-TuRZ72Y6yFvyj8ZJ17eWX9ppEHM=" }, "subleveldown": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/subleveldown/-/subleveldown-5.0.1.tgz", - "integrity": "sha512-cVqd/URpp7si1HWu5YqQ3vqQkjuolAwHypY1B4itPlS71/lsf6TQPZ2Y0ijT22EYVkvH5ove9JFJf4u7VGPuZw==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/subleveldown/-/subleveldown-6.0.1.tgz", + "integrity": "sha512-Cnf+cn2wISXU2xflY1SFIqfX4hG2d6lFk2P5F8RDQLmiqN9Ir4ExNfUFH6xnmizMseM/t+nMsDUKjN9Kw6ShFA==", "requires": { - "abstract-leveldown": "^6.3.0", - "encoding-down": "^6.2.0", + "abstract-leveldown": "^7.2.0", + "encoding-down": "^7.1.0", "inherits": "^2.0.3", "level-option-wrap": "^1.1.0", - "levelup": "^4.4.0", + "levelup": "^5.1.1", "reachdown": "^1.1.0" - }, - "dependencies": { - "abstract-leveldown": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.3.0.tgz", - "integrity": "sha512-TU5nlYgta8YrBMNpc9FwQzRbiXsj49gsALsXadbGHt9CROPzX5fB0rWDR5mtdpOOKa5XqRFpbj1QroPAoPzVjQ==", - "requires": { - "buffer": "^5.5.0", - "immediate": "^3.2.3", - "level-concat-iterator": "~2.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - }, - "buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "deferred-leveldown": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/deferred-leveldown/-/deferred-leveldown-5.3.0.tgz", - "integrity": "sha512-a59VOT+oDy7vtAbLRCZwWgxu2BaCfd5Hk7wxJd48ei7I+nsg8Orlb9CLG0PMZienk9BSUKgeAqkO2+Lw+1+Ukw==", - "requires": { - "abstract-leveldown": "~6.2.1", - "inherits": "^2.0.3" - }, - "dependencies": { - "abstract-leveldown": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.2.3.tgz", - "integrity": "sha512-BsLm5vFMRUrrLeCcRc+G0t2qOaTzpoJQLOubq2XM72eNpjF5UdU5o/5NvlNhx95XHcAvcl8OMXr4mlg/fRgUXQ==", - "requires": { - "buffer": "^5.5.0", - "immediate": "^3.2.3", - "level-concat-iterator": "~2.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - } - } - }, - "encoding-down": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/encoding-down/-/encoding-down-6.3.0.tgz", - "integrity": "sha512-QKrV0iKR6MZVJV08QY0wp1e7vF6QbhnbQhb07bwpEyuz4uZiZgPlEGdkCROuFkUwdxlFaiPIhjyarH1ee/3vhw==", - "requires": { - "abstract-leveldown": "^6.2.1", - "inherits": "^2.0.3", - "level-codec": "^9.0.0", - "level-errors": "^2.0.0" - } - }, - "level-codec": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/level-codec/-/level-codec-9.0.2.tgz", - "integrity": "sha512-UyIwNb1lJBChJnGfjmO0OR+ezh2iVu1Kas3nvBS/BzGnx79dv6g7unpKIDNPMhfdTEGoc7mC8uAu51XEtX+FHQ==", - "requires": { - "buffer": "^5.6.0" - } - }, - "level-concat-iterator": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/level-concat-iterator/-/level-concat-iterator-2.0.1.tgz", - "integrity": "sha512-OTKKOqeav2QWcERMJR7IS9CUo1sHnke2C0gkSmcR7QuEtFNLLzHQAvnMw8ykvEcv0Qtkg0p7FOwP1v9e5Smdcw==" - }, - "level-errors": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/level-errors/-/level-errors-2.0.1.tgz", - "integrity": "sha512-UVprBJXite4gPS+3VznfgDSU8PTRuVX0NXwoWW50KLxd2yw4Y1t2JUR5In1itQnudZqRMT9DlAM3Q//9NCjCFw==", - "requires": { - "errno": "~0.1.1" - } - }, - "level-iterator-stream": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/level-iterator-stream/-/level-iterator-stream-4.0.2.tgz", - "integrity": "sha512-ZSthfEqzGSOMWoUGhTXdX9jv26d32XJuHz/5YnuHZzH6wldfWMOVwI9TBtKcya4BKTyTt3XVA0A3cF3q5CY30Q==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.4.0", - "xtend": "^4.0.2" - } - }, - "level-supports": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/level-supports/-/level-supports-1.0.1.tgz", - "integrity": "sha512-rXM7GYnW8gsl1vedTJIbzOrRv85c/2uCMpiiCzO2fndd06U/kUXEEU9evYn4zFggBOg36IsBW8LzqIpETwwQzg==", - "requires": { - "xtend": "^4.0.2" - } - }, - "levelup": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/levelup/-/levelup-4.4.0.tgz", - "integrity": "sha512-94++VFO3qN95cM/d6eBXvd894oJE0w3cInq9USsyQzzoJxmiYzPAocNcuGCPGGjoXqDVJcr3C1jzt1TSjyaiLQ==", - "requires": { - "deferred-leveldown": "~5.3.0", - "level-errors": "~2.0.0", - "level-iterator-stream": "~4.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - } } }, "supports-color": { @@ -9615,11 +9534,6 @@ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", "dev": true }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" - }, "y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", diff --git a/package.json b/package.json index cf62e90429..0153b70cd6 100644 --- a/package.json +++ b/package.json @@ -73,7 +73,7 @@ "dependencies": { "@grpc/grpc-js": "1.3.7", "@matrixai/async-init": "^1.6.0", - "@matrixai/db": "^1.1.5", + "@matrixai/db": "^1.2.1", "@matrixai/id": "^3.3.2", "@matrixai/logger": "^2.1.0", "@matrixai/workers": "^1.2.5", @@ -109,6 +109,7 @@ "@types/cross-spawn": "^6.0.2", "@types/google-protobuf": "^3.7.4", "@types/jest": "^26.0.20", + "@types/level": "^6.0.0", "@types/nexpect": "^0.4.31", "@types/node": "^14.14.35", "@types/node-forge": "^0.9.7", diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts index 09c354ff2a..ede1315e1e 100644 --- a/src/client/service/nodesGetAll.ts +++ b/src/client/service/nodesGetAll.ts @@ -29,7 +29,9 @@ function nodesGetAll({ const response = new nodesPB.NodeBuckets(); const metadata = await authenticate(call.metadata); call.sendMetadata(metadata); - const buckets = await nodeGraph.getAllBuckets(); + // FIXME: + // const buckets = await nodeGraph.getAllBuckets(); + const buckets: any = [] for (const b of buckets) { let index; for (const id of Object.keys(b)) { @@ -39,7 +41,7 @@ function nodesGetAll({ .setPort(b[id].address.port); // For every node in every bucket, add it to our message if (!index) { - index = nodesUtils.calculateBucketIndex( + index = nodesUtils.bucketIndex( keyManager.getNodeId(), IdInternal.fromString(id) ); diff --git a/src/discovery/Discovery.ts b/src/discovery/Discovery.ts index 900b6b63fa..bf6cbd63a3 100644 --- a/src/discovery/Discovery.ts +++ b/src/discovery/Discovery.ts @@ -148,7 +148,7 @@ class Discovery { reverse: true, }); for await (const o of keyStream) { - latestId = IdInternal.fromBuffer(o); + latestId = IdInternal.fromBuffer(o as Buffer); } this.discoveryQueueIdGenerator = discoveryUtils.createDiscoveryQueueIdGenerator(latestId); @@ -208,8 +208,9 @@ class Discovery { while (true) { if (!(await this.queueIsEmpty())) { for await (const o of this.discoveryQueueDb.createReadStream()) { - const vertexId = IdInternal.fromBuffer(o.key) as DiscoveryQueueId; - const data = o.value as Buffer; + const kv = (o as any) + const vertexId = IdInternal.fromBuffer(kv.key) as DiscoveryQueueId; + const data = kv.value as Buffer; const vertex = await this.db.deserializeDecrypt( data, false, @@ -438,7 +439,7 @@ class Discovery { limit: 1, }); for await (const o of keyStream) { - nextDiscoveryQueueId = IdInternal.fromBuffer(o); + nextDiscoveryQueueId = IdInternal.fromBuffer(o as Buffer); } if (nextDiscoveryQueueId == null) { return true; diff --git a/src/network/utils.ts b/src/network/utils.ts index def14c3374..1882d927d1 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -41,10 +41,12 @@ function isHostname(hostname: any): hostname is Hostname { /** * Ports must be numbers between 0 and 65535 inclusive + * If connect is true, then port must be a number between 1 and 65535 inclusive */ -function isPort(port: any): port is Port { +function isPort(port: any, connect: boolean = false): port is Port { if (typeof port !== 'number') return false; if (port < 0 || port > 65535) return false; + if (connect && port === 0) return false; return true; } diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 567a09930e..eb56c154e1 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -9,7 +9,7 @@ import type { NodeAddress, NodeData, SeedNodes, - NodeIdString, + NodeIdString, NodeEntry, } from './types'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; @@ -411,7 +411,7 @@ class NodeConnectionManager { public async findNode(targetNodeId: NodeId): Promise { // First check if we already have an existing ID -> address record - let address = await this.nodeGraph.getNode(targetNodeId); + let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network if (address == null) { address = await this.getClosestGlobalNodes(targetNodeId); @@ -503,7 +503,7 @@ class NodeConnectionManager { // getClosestGlobalNodes()? const contacted: { [nodeId: string]: boolean } = {}; // Iterate until we've found found and contacted k nodes - while (Object.keys(contacted).length <= this.nodeGraph.maxNodesPerBucket) { + while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { // While (!foundTarget) { // Remove the node from the front of the array const nextNode = shortlist.shift(); @@ -534,27 +534,30 @@ class NodeConnectionManager { ); // Check to see if any of these are the target node. At the same time, add // them to the shortlist - for (const nodeData of foundClosest) { + for (const [nodeId, nodeData] of foundClosest) { // Ignore any nodes that have been contacted - if (contacted[nodeData.id]) { + if (contacted[nodeId]) { continue; } - if (nodeData.id.equals(targetNodeId)) { - await this.nodeGraph.setNode(nodeData.id, nodeData.address); + if (nodeId.equals(targetNodeId)) { + await this.nodeGraph.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it // in the shortlist break; } - shortlist.push(nodeData); + shortlist.push([nodeId, nodeData]); } // To make the number of jumps relatively short, should connect to the nodes // closest to the target first, and ask if they know of any closer nodes // Then we can simply unshift the first (closest) element from the shortlist - shortlist.sort(function (a: NodeData, b: NodeData) { - if (a.distance > b.distance) { + const distance = (nodeId: NodeId) => nodesUtils.nodeDistance(targetNodeId, nodeId); + shortlist.sort(function ([nodeIdA, ], [nodeIdB, ]) { + const distanceA = distance(nodeIdA); + const distanceB = distance(nodeIdB); + if (distanceA > distanceB) { return 1; - } else if (a.distance < b.distance) { + } else if (distanceA < distanceB) { return -1; } else { return 0; @@ -575,7 +578,7 @@ class NodeConnectionManager { public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, - ): Promise> { + ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); @@ -583,20 +586,21 @@ class NodeConnectionManager { return this.withConnF(nodeId, async (connection) => { const client = await connection.getClient(); const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array = []; + const nodes: Array<[NodeId, NodeData]> = []; // Loop over each map element (from the returned response) and populate nodes response.getNodeTableMap().forEach((address, nodeIdString: string) => { const nodeId = nodesUtils.decodeNodeId(nodeIdString); // If the nodeId is not valid we don't add it to the list of nodes if (nodeId != null) { - nodes.push({ - id: nodeId, + nodes.push([ + nodeId, + { address: { host: address.getHost() as Host | Hostname, port: address.getPort() as Port, }, - distance: nodesUtils.calculateDistance(targetNodeId, nodeId), - }); + lastUpdated: 0, // FIXME? + }]); } }); return nodes; @@ -630,8 +634,9 @@ class NodeConnectionManager { seedNodeId, this.keyManager.getNodeId(), ); - for (const n of nodes) { - await this.nodeGraph.setNode(n.id, n.address); + for (const [nodeId, nodeData] of nodes) { + // FIXME: this should be the `nodeManager.setNode` + await this.nodeGraph.setNode(nodeId, nodeData.address); } } } diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 4237b55291..069c781177 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,9 +1,16 @@ -import type { DB, DBLevel, DBOp } from '@matrixai/db'; -import type { NodeId, NodeAddress, NodeBucket } from './types'; +import type { DB, DBDomain, DBLevel, DBOp, DBTransaction, Transaction } from '@matrixai/db'; +import type { + NodeId, + NodeIdString, + NodeAddress, + NodeBucket, + NodeData, + NodeBucketMeta, + NodeBucketIndex, + NodeGraphSpace, +} from './types'; import type KeyManager from '../keys/KeyManager'; -import type { Host, Hostname, Port } from '../network/types'; -import { Mutex } from 'async-mutex'; -import lexi from 'lexicographic-integer'; +import type { ResourceAcquire, ResourceRelease } from '../utils'; import Logger from '@matrixai/logger'; import { CreateDestroyStartStop, @@ -12,10 +19,12 @@ import { import { IdInternal } from '@matrixai/id'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; +import { RWLock, withF, withG, getUnixtime } from '../utils'; + /** * NodeGraph is an implementation of Kademlia for maintaining peer to peer information - * We maintain a map of buckets. Where each bucket has k number of node infos + * It is a database of fixed-size buckets, where each bucket contains NodeId -> NodeData */ interface NodeGraph extends CreateDestroyStartStop {} @CreateDestroyStartStop( @@ -23,29 +32,16 @@ interface NodeGraph extends CreateDestroyStartStop {} new nodesErrors.ErrorNodeGraphDestroyed(), ) class NodeGraph { - // Max number of nodes in each k-bucket (a.k.a. k) - public readonly maxNodesPerBucket: number = 20; - - protected logger: Logger; - protected db: DB; - protected keyManager: KeyManager; - protected nodeGraphDbDomain: string = this.constructor.name; - protected nodeGraphBucketsDbDomain: Array = [ - this.nodeGraphDbDomain, - 'buckets', - ]; - protected nodeGraphDb: DBLevel; - protected nodeGraphBucketsDb: DBLevel; - protected lock: Mutex = new Mutex(); - public static async createNodeGraph({ db, keyManager, + nodeIdBits = 256, logger = new Logger(this.name), fresh = false, }: { db: DB; keyManager: KeyManager; + nodeIdBits?: number; logger?: Logger; fresh?: boolean; }): Promise { @@ -53,6 +49,7 @@ class NodeGraph { const nodeGraph = new NodeGraph({ db, keyManager, + nodeIdBits, logger, }); await nodeGraph.start({ fresh }); @@ -60,375 +57,621 @@ class NodeGraph { return nodeGraph; } + /** + * Bit size of the NodeIds + * This equals the number of buckets + */ + public readonly nodeIdBits: number; + /** + * Max number of nodes in each k-bucket + */ + public readonly nodeBucketLimit: number = 20; + + protected logger: Logger; + protected db: DB; + protected keyManager: KeyManager; + protected space: NodeGraphSpace; + protected nodeGraphDbDomain: DBDomain = [this.constructor.name]; + protected nodeGraphMetaDbDomain: DBDomain; + protected nodeGraphBucketsDbDomain: DBDomain; + protected nodeGraphLastUpdatedDbDomain: DBDomain; + protected nodeGraphDb: DBLevel; + protected nodeGraphMetaDb: DBLevel; + protected nodeGraphBucketsDb: DBLevel; + protected nodeGraphLastUpdatedDb: DBLevel; + + // WORK out a way to do re-entrancy properly + // Otherwise we have restrictions on the way we are developing stuff + protected lock: RWLock = new RWLock(); + constructor({ db, keyManager, + nodeIdBits, logger, }: { db: DB; keyManager: KeyManager; + nodeIdBits: number; logger: Logger; }) { this.logger = logger; this.db = db; this.keyManager = keyManager; + this.nodeIdBits = nodeIdBits; } get locked(): boolean { return this.lock.isLocked(); } - public async start({ - fresh = false, - }: { - fresh?: boolean; - } = {}) { + public acquireLockRead(lazy: boolean = false): ResourceAcquire { + return async () => { + let release: ResourceRelease; + if (lazy && this.lock.isLocked()) { + release = async () => {}; + } else { + const r = await this.lock.acquireRead(); + release = async () => r(); + } + return [release, this.lock]; + }; + } + + public acquireLockWrite(lazy: boolean = false): ResourceAcquire { + return async () => { + let release: ResourceRelease; + if (lazy && this.lock.isLocked()) { + release = async () => {}; + } else { + const r = await this.lock.acquireWrite(); + release = async () => r(); + } + return [release, this.lock]; + }; + } + + public async start({ fresh = false }: { fresh?: boolean } = {}): Promise { this.logger.info(`Starting ${this.constructor.name}`); - const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain); - // Buckets stores NodeBucketIndex -> NodeBucket - const nodeGraphBucketsDb = await this.db.level( - this.nodeGraphBucketsDbDomain[1], - nodeGraphDb, - ); + const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain[0]); if (fresh) { await nodeGraphDb.clear(); } + // Space key is used to create a swappable sublevel + // when remapping the buckets during `this.refreshBuckets` + const space = await this.setupSpace(); + const nodeGraphMetaDbDomain = [ + this.nodeGraphDbDomain[0], + 'meta' + space + ]; + const nodeGraphBucketsDbDomain = [ + this.nodeGraphDbDomain[0], + 'buckets' + space + ]; + const nodeGraphLastUpdatedDbDomain = [ + this.nodeGraphDbDomain[0], + 'lastUpdated' + space + ]; + // Bucket metadata sublevel: `!meta!! -> value` + const nodeGraphMetaDb = await this.db.level( + nodeGraphMetaDbDomain[1], + nodeGraphDb, + ); + // Bucket sublevel: `!buckets!! -> NodeData` + // The BucketIndex can range from 0 to NodeId bitsize minus 1 + // So 256 bits means 256 buckets of 0 to 255 + const nodeGraphBucketsDb = await this.db.level( + nodeGraphBucketsDbDomain[1], + nodeGraphDb, + ); + // Last updated sublevel: `!lastUpdated!!- -> NodeId` + // This is used as a sorted index of the NodeId by `lastUpdated` timestamp + // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `lastUpdated` timestamp + const nodeGraphLastUpdatedDb = await this.db.level( + nodeGraphLastUpdatedDbDomain[1], + nodeGraphDb + ); + this.space = space; + this.nodeGraphMetaDbDomain = nodeGraphMetaDbDomain; + this.nodeGraphBucketsDbDomain = nodeGraphBucketsDbDomain; + this.nodeGraphLastUpdatedDbDomain = nodeGraphLastUpdatedDbDomain; this.nodeGraphDb = nodeGraphDb; + this.nodeGraphMetaDb = nodeGraphMetaDb; this.nodeGraphBucketsDb = nodeGraphBucketsDb; + this.nodeGraphLastUpdatedDb = nodeGraphLastUpdatedDb; this.logger.info(`Started ${this.constructor.name}`); } - public async stop() { + public async stop(): Promise { this.logger.info(`Stopping ${this.constructor.name}`); this.logger.info(`Stopped ${this.constructor.name}`); } - public async destroy() { + public async destroy(): Promise { this.logger.info(`Destroying ${this.constructor.name}`); - const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain); + // If the DB was stopped, the existing sublevel `this.nodeGraphDb` will not be valid + // Therefore we recreate the sublevel here + const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain[0]); await nodeGraphDb.clear(); this.logger.info(`Destroyed ${this.constructor.name}`); } /** - * Run several operations within the same lock - * This does not ensure atomicity of the underlying database - * Database atomicity still depends on the underlying operation + * Sets up the space key + * The space string is suffixed to the `buckets` and `meta` sublevels + * This is used to allow swapping of sublevels when remapping buckets + * during `this.refreshBuckets` */ - public async transaction( - f: (nodeGraph: NodeGraph) => Promise, - ): Promise { - const release = await this.lock.acquire(); - try { - return await f(this); - } finally { - release(); + protected async setupSpace(): Promise { + let space = await this.db.get(this.nodeGraphDbDomain, 'space'); + if (space != null) { + return space; } + space = '0'; + await this.db.put(this.nodeGraphDbDomain, 'space', space); + return space; + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getNode(nodeId: NodeId): Promise { + const [bucketIndex] = this.bucketIndex(nodeId); + const bucketDomain = [ + ...this.nodeGraphBucketsDbDomain, + nodesUtils.bucketKey(bucketIndex) + ]; + return await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + ); } /** - * Transaction wrapper that will not lock if the operation was executed - * within a transaction context + * Get all nodes + * Nodes are always sorted by `NodeBucketIndex` first + * Then secondly by the node IDs + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc */ - public async _transaction(f: () => Promise): Promise { - if (this.lock.isLocked()) { - return await f(); - } else { - return await this.transaction(f); + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async *getNodes(order: 'asc' | 'desc' = 'asc'): AsyncGenerator<[NodeId, NodeData]> { + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + reverse: (order === 'asc') ? false : true + })) { + const { nodeId, bucketIndex } = nodesUtils.parseBucketsDbKey((o as any).key as Buffer); + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt(data, false) + yield [nodeId, nodeData]; } } - /** - * Retrieves the node Address - * @param nodeId node ID of the target node - * @returns Node Address of the target node - */ + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getNode(nodeId: NodeId): Promise { - return await this._transaction(async () => { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, + public async setNode( + nodeId: NodeId, + nodeAddress: NodeAddress, + ): Promise { + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketDomain = [...this.nodeGraphBucketsDbDomain, bucketKey]; + const lastUpdatedDomain = [...this.nodeGraphLastUpdatedDbDomain, bucketKey]; + const nodeData = await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + ); + // If this is a new entry, check the bucket limit + if (nodeData == null) { + const count = await this.getBucketMetaProp(bucketIndex, 'count'); + if (count < this.nodeBucketLimit) { + // Increment the bucket count + this.setBucketMetaProp(bucketIndex, 'count', count + 1); + } else { + // Remove the oldest entry in the bucket + const lastUpdatedBucketDb = await this.db.level( + bucketKey, + this.nodeGraphLastUpdatedDb + ); + let oldestLastUpdatedKey: Buffer; + let oldestNodeId: NodeId; + for await (const key of lastUpdatedBucketDb.createKeyStream({ limit: 1 })) { + oldestLastUpdatedKey = key as Buffer; + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey(key as Buffer)); + } + await this.db.del(bucketDomain, oldestNodeId!.toBuffer()); + await this.db.del(lastUpdatedDomain, oldestLastUpdatedKey!); + } + } else { + // This is an existing entry, so the index entry must be reset + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey(nodeData.lastUpdated, nodeId) + await this.db.del( + lastUpdatedDomain, + lastUpdatedKey ); - if (bucket != null && nodeId in bucket) { - return bucket[nodeId].address; + } + const lastUpdated = getUnixtime(); + await this.db.put( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + { + address: nodeAddress, + lastUpdated, } - return; - }); + ); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey(lastUpdated, nodeId) + await this.db.put( + lastUpdatedDomain, + lastUpdatedKey, + nodesUtils.bucketDbKey(nodeId), + true + ); } - /** - * Determines whether a node ID -> node address mapping exists in this node's - * node table. - * @param targetNodeId the node ID of the node to find - * @returns true if the node exists in the table, false otherwise - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async knowsNode(targetNodeId: NodeId): Promise { - return !!(await this.getNode(targetNodeId)); + public async unsetNode(nodeId: NodeId): Promise { + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketDomain = [...this.nodeGraphBucketsDbDomain, bucketKey]; + const lastUpdatedDomain = [...this.nodeGraphLastUpdatedDbDomain, bucketKey]; + const nodeData = await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId) + ); + if (nodeData != null) { + const count = await this.getBucketMetaProp(bucketIndex, 'count'); + this.setBucketMetaProp(bucketIndex, 'count', count - 1); + await this.db.del( + bucketDomain, + nodesUtils.bucketDbKey(nodeId) + ); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey(nodeData.lastUpdated, nodeId) + await this.db.del(lastUpdatedDomain, lastUpdatedKey); + } } /** - * Returns the specified bucket if it exists - * @param bucketIndex + * Gets a bucket + * The bucket's node IDs is sorted lexicographically by default + * Alternatively you can acquire them sorted by lastUpdated timestamp + * or by distance to the own NodeId */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getBucket(bucketIndex: number): Promise { - return await this._transaction(async () => { - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - lexi.pack(bucketIndex, 'hex'), + public async getBucket( + bucketIndex: NodeBucketIndex, + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex(`bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`); + } + const bucketKey = nodesUtils.bucketKey(bucketIndex); + let bucket: NodeBucket = []; + if (sort === 'nodeId' || sort ==='distance') { + const bucketDb = await this.db.level( + bucketKey, + this.nodeGraphBucketsDb ); - // Cast the non-primitive types correctly (ensures type safety when using them) - for (const nodeId in bucket) { - bucket[nodeId].address.host = bucket[nodeId].address.host as - | Host - | Hostname; - bucket[nodeId].address.port = bucket[nodeId].address.port as Port; - bucket[nodeId].lastUpdated = new Date(bucket[nodeId].lastUpdated); + for await (const o of bucketDb.createReadStream({ + reverse: (order === 'asc') ? false : true + })) { + const nodeId = nodesUtils.parseBucketDbKey((o as any).key as Buffer); + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt(data, false) + bucket.push([nodeId, nodeData]); } - return bucket; - }); + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order + ); + } + } else if (sort === 'lastUpdated') { + const bucketDb = await this.db.level( + bucketKey, + this.nodeGraphBucketsDb + ); + const lastUpdatedBucketDb = await this.db.level( + bucketKey, + this.nodeGraphLastUpdatedDb + ); + const bucketDbIterator = bucketDb.iterator(); + try { + for await (const indexData of lastUpdatedBucketDb.createValueStream({ + reverse: (order === 'asc') ? false : true + })) { + const nodeIdBuffer = await this.db.deserializeDecrypt(indexData as Buffer, true); + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + bucketDbIterator.seek(nodeIdBuffer); + // @ts-ignore + const [, bucketData] = await bucketDbIterator.next(); + const nodeData = await this.db.deserializeDecrypt(bucketData, false); + bucket.push([nodeId, nodeData]); + } + } finally { + // @ts-ignore + await bucketDbIterator.end(); + } + } + return bucket; } /** - * Sets a node to the bucket database - * This may delete an existing node if the bucket is filled up + * Gets all buckets + * Buckets are always sorted by `NodeBucketIndex` first + * Then secondly by the `sort` parameter + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc + * NodeBucketIndex asc, distance asc + * NodeBucketIndex desc, distance desc + * NodeBucketIndex asc, lastUpdated asc + * NodeBucketIndex desc, lastUpdated desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async setNode( - nodeId: NodeId, - nodeAddress: NodeAddress, - ): Promise { - return await this._transaction(async () => { - const ops = await this.setNodeOps(nodeId, nodeAddress); - await this.db.batch(ops); - }); - } - - protected async setNodeOps( - nodeId: NodeId, - nodeAddress: NodeAddress, - ): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - let bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, - ); - if (bucket == null) { - bucket = {}; - } - bucket[nodeId] = { - address: nodeAddress, - lastUpdated: new Date(), - }; - // Perform the check on size after we add/update the node. If it's an update, - // then we don't need to perform the deletion - let bucketEntries = Object.entries(bucket); - if (bucketEntries.length > this.maxNodesPerBucket) { - const leastActive = bucketEntries.reduce((prev, curr) => { - return new Date(prev[1].lastUpdated) < new Date(curr[1].lastUpdated) - ? prev - : curr; - }); - delete bucket[leastActive[0]]; - bucketEntries = Object.entries(bucket); - // For safety, make sure that the bucket is actually at maxNodesPerBucket - if (bucketEntries.length !== this.maxNodesPerBucket) { - throw new nodesErrors.ErrorNodeGraphOversizedBucket(); + public async *getBuckets( + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc' + ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { + let bucketIndex: NodeBucketIndex | undefined; + let bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + reverse: (order === 'asc') ? false : true + })) { + const { + bucketIndex: bucketIndex_, + nodeId + } = nodesUtils.parseBucketsDbKey((o as any).key); + const data = (o as any).value; + const nodeData = await this.db.deserializeDecrypt(data, false); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_ + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order + ); + } + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order + ); + } + yield [bucketIndex, bucket]; + } + } else if (sort === 'lastUpdated') { + const bucketsDbIterator = this.nodeGraphBucketsDb.iterator(); + try { + for await (const key of this.nodeGraphLastUpdatedDb.createKeyStream({ + reverse: (order === 'asc') ? false : true + })) { + const { + bucketIndex: bucketIndex_, + nodeId + } = nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + bucketsDbIterator.seek( + nodesUtils.bucketsDbKey( + bucketIndex_, + nodeId + ) + ); + // @ts-ignore + const [, bucketData] = await bucketsDbIterator.next(); + const nodeData = await this.db.deserializeDecrypt(bucketData, false); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_ + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + yield [bucketIndex, bucket]; + } + } finally { + // @ts-ignore + await bucketsDbIterator.end(); } } - return [ - { - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }, - ]; } - /** - * Updates an existing node - * It will update the lastUpdated time - * Optionally it can replace the NodeAddress - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise { - return await this._transaction(async () => { - const ops = await this.updateNodeOps(nodeId, nodeAddress); - await this.db.batch(ops); - }); - } - - protected async updateNodeOps( - nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, + public async resetBuckets(nodeIdOwn: NodeId): Promise { + // Setup new space + const spaceNew = (this.space === '0') ? '1' : '0'; + const nodeGraphMetaDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'meta' + spaceNew + ]; + const nodeGraphBucketsDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'buckets' + spaceNew + ]; + const nodeGraphLastUpdatedDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'index' + spaceNew + ]; + // Clear the new space (in case it wasn't cleaned properly last time) + const nodeGraphMetaDbNew = await this.db.level( + nodeGraphMetaDbDomainNew[1], + this.nodeGraphDb + ); + const nodeGraphBucketsDbNew = await this.db.level( + nodeGraphBucketsDbDomainNew[1], + this.nodeGraphDb, ); - const ops: Array = []; - if (bucket != null && nodeId in bucket) { - bucket[nodeId].lastUpdated = new Date(); - if (nodeAddress != null) { - bucket[nodeId].address = nodeAddress; + const nodeGraphLastUpdatedDbNew = await this.db.level( + nodeGraphLastUpdatedDbDomainNew[1], + this.nodeGraphDb, + ); + await nodeGraphMetaDbNew.clear(); + await nodeGraphBucketsDbNew.clear(); + await nodeGraphLastUpdatedDbNew.clear(); + // Iterating over all entries across all buckets + for await (const o of this.nodeGraphBucketsDb.createReadStream()) { + // The key is a combined bucket key and node ID + const { nodeId } = nodesUtils.parseBucketsDbKey((o as any).key as Buffer); + // If the new own node ID is one of the existing node IDs, it is just dropped + // We only map to the new bucket if it isn't one of the existing node IDs + if (nodeId.equals(nodeIdOwn)) { + continue; } - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }); - } else { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKeyNew = nodesUtils.bucketKey(bucketIndexNew); + const metaDomainNew = [ + ...nodeGraphMetaDbDomainNew, + bucketKeyNew + ]; + const bucketDomainNew = [ + ...nodeGraphBucketsDbDomainNew, + bucketKeyNew + ]; + const indexDomainNew = [ + ...nodeGraphLastUpdatedDbDomainNew, + bucketKeyNew + ]; + const countNew = await this.db.get(metaDomainNew, 'count') ?? 0; + if (countNew < this.nodeBucketLimit) { + await this.db.put(metaDomainNew, 'count', countNew + 1); + } else { + const lastUpdatedBucketDbNew = await this.db.level( + bucketKeyNew, + nodeGraphLastUpdatedDbNew + ); + let oldestIndexKey: Buffer; + let oldestNodeId: NodeId; + for await (const key of lastUpdatedBucketDbNew.createKeyStream({ limit: 1 })) { + oldestIndexKey = key as Buffer; + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey(key as Buffer)); + } + await this.db.del(bucketDomainNew, nodesUtils.bucketDbKey(oldestNodeId!)); + await this.db.del(indexDomainNew, oldestIndexKey!); + } + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt(data, false) + await this.db.put(bucketDomainNew, nodesUtils.bucketDbKey(nodeId), nodeData); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId + ); + await this.db.put(indexDomainNew, lastUpdatedKey, nodesUtils.bucketDbKey(nodeId), true); } - return ops; + // Swap to the new space + await this.db.put(this.nodeGraphDbDomain, 'space', spaceNew); + // Clear old space + this.nodeGraphMetaDb.clear(); + this.nodeGraphBucketsDb.clear(); + this.nodeGraphLastUpdatedDb.clear(); + // Swap the spaces + this.space = spaceNew; + this.nodeGraphMetaDbDomain = nodeGraphMetaDbDomainNew; + this.nodeGraphBucketsDbDomain = nodeGraphBucketsDbDomainNew; + this.nodeGraphLastUpdatedDbDomain = nodeGraphLastUpdatedDbDomainNew; + this.nodeGraphMetaDb = nodeGraphMetaDbNew; + this.nodeGraphBucketsDb = nodeGraphBucketsDbNew; + this.nodeGraphLastUpdatedDb = nodeGraphLastUpdatedDbNew; } - /** - * Removes a node from the bucket database - * @param nodeId - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async unsetNode(nodeId: NodeId): Promise { - return await this._transaction(async () => { - const ops = await this.unsetNodeOps(nodeId); - await this.db.batch(ops); - }); + public async getBucketMeta(bucketIndex: NodeBucketIndex): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex(`bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`); + } + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex) + ]; + const props = await Promise.all([ + this.db.get(metaDomain, 'count') + ]); + const [count] = props; + // Bucket meta properties have defaults + return { + count: count ?? 0, + }; } - protected async unsetNodeOps(nodeId: NodeId): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, - ); - const ops: Array = []; - if (bucket == null) { - return ops; + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex(`bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`); } - delete bucket[nodeId]; - if (Object.keys(bucket).length === 0) { - ops.push({ - type: 'del', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - }); - } else { - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }); + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex) + ]; + // Bucket meta properties have defaults + let value; + switch (key) { + case 'count': + value = await this.db.get( + metaDomain, + key + ) ?? 0; + break; } - return ops; + return value; } /** - * Find the correct index of the k-bucket to add a new node to (for this node's - * bucket database). Packs it as a lexicographic integer, such that the order - * of buckets in leveldb is numerical order. + * Sets a bucket meta property + * This is protected because users cannot directly manipulate bucket meta */ - protected getBucketIndex(nodeId: NodeId): string { - const index = nodesUtils.calculateBucketIndex( - this.keyManager.getNodeId(), - nodeId, - ); - return lexi.pack(index, 'hex') as string; - } - - /** - * Returns all of the buckets in an array - */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getAllBuckets(): Promise> { - return await this._transaction(async () => { - const buckets: Array = []; - for await (const o of this.nodeGraphBucketsDb.createReadStream()) { - const data = (o as any).value as Buffer; - const bucket = await this.db.deserializeDecrypt( - data, - false, - ); - buckets.push(bucket); - } - return buckets; - }); + protected async setBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + value: NodeBucketMeta[Key], + ): Promise { + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex) + ]; + await this.db.put(metaDomain, key, value); + return; } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. - * NOTE: original nodes may be lost in this process. If they're redistributed - * to a newly full bucket, the least active nodes in the newly full bucket - * will be removed. + * Derive the bucket index of the k-buckets from the new `NodeId` + * The bucket key is the string encoded version of bucket index + * that preserves lexicographic order */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async refreshBuckets(): Promise { - return await this._transaction(async () => { - const ops: Array = []; - // Get a local copy of all the buckets - const buckets = await this.getAllBuckets(); - // Wrap as a batch operation. We want to rollback if we encounter any - // errors (such that we don't clear the DB without re-adding the nodes) - // 1. Delete every bucket - for await (const k of this.nodeGraphBucketsDb.createKeyStream()) { - const hexBucketIndex = k as string; - ops.push({ - type: 'del', - domain: this.nodeGraphBucketsDbDomain, - key: hexBucketIndex, - }); - } - const tempBuckets: Record = {}; - // 2. Re-add all the nodes from all buckets - for (const b of buckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - const newIndex = this.getBucketIndex(nodeId); - let expectedBucket = tempBuckets[newIndex]; - // The following is more or less copied from setNodeOps - if (expectedBucket == null) { - expectedBucket = {}; - } - const bucketEntries = Object.entries(expectedBucket); - // Add the old node - expectedBucket[nodeId] = { - address: b[nodeId].address, - lastUpdated: b[nodeId].lastUpdated, - }; - // If, with the old node added, we exceed the limit - if (bucketEntries.length > this.maxNodesPerBucket) { - // Then, with the old node added, find the least active and remove - const leastActive = bucketEntries.reduce((prev, curr) => { - return prev[1].lastUpdated < curr[1].lastUpdated ? prev : curr; - }); - delete expectedBucket[leastActive[0]]; - } - // Add this reconstructed bucket (with old node) into the temp storage - tempBuckets[newIndex] = expectedBucket; - } - } - // Now that we've reconstructed all the buckets, perform batch operations - // on a bucket level (i.e. per bucket, instead of per node) - for (const bucketIndex in tempBuckets) { - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: tempBuckets[bucketIndex], - }); - } - await this.db.batch(ops); - }); + protected bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + const nodeIdOwn = this.keyManager.getNodeId(); + if (nodeId.equals(nodeIdOwn)) { + throw new nodesErrors.ErrorNodeGraphSameNodeId(); + } + const bucketIndex = nodesUtils.bucketIndex( + nodeIdOwn, + nodeId, + ); + const bucketKey = nodesUtils.bucketKey(bucketIndex) + return [bucketIndex, bucketKey]; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index b283436676..a45a0bcdb0 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -306,7 +306,7 @@ class NodeManager { public async getNodeAddress( nodeId: NodeId, ): Promise { - return await this.nodeGraph.getNode(nodeId); + return (await this.nodeGraph.getNode(nodeId))?.address; } /** @@ -315,7 +315,7 @@ class NodeManager { * @returns true if the node exists in the table, false otherwise */ public async knowsNode(targetNodeId: NodeId): Promise { - return await this.nodeGraph.knowsNode(targetNodeId); + return await this.nodeGraph.getNode(targetNodeId) != null; } /** @@ -335,15 +335,16 @@ class NodeManager { return await this.nodeGraph.setNode(nodeId, nodeAddress); } - /** - * Updates the node in the NodeGraph - */ - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise { - return await this.nodeGraph.updateNode(nodeId, nodeAddress); - } + // FIXME + // /** + // * Updates the node in the NodeGraph + // */ + // public async updateNode( + // nodeId: NodeId, + // nodeAddress?: NodeAddress, + // ): Promise { + // return await this.nodeGraph.updateNode(nodeId, nodeAddress); + // } /** * Removes a node from the NodeGraph @@ -352,19 +353,22 @@ class NodeManager { return await this.nodeGraph.unsetNode(nodeId); } - /** - * Gets all buckets from the NodeGraph - */ - public async getAllBuckets(): Promise> { - return await this.nodeGraph.getAllBuckets(); - } + // FIXME + // /** + // * Gets all buckets from the NodeGraph + // */ + // public async getAllBuckets(): Promise> { + // return await this.nodeGraph.getBuckets(); + // } + // FIXME /** * To be called on key renewal. Re-orders all nodes in all buckets with respect * to the new node ID. */ public async refreshBuckets(): Promise { - return await this.nodeGraph.refreshBuckets(); + throw Error('fixme') + // return await this.nodeGraph.refreshBuckets(); } } diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index d45c83474e..873a441100 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -37,6 +37,11 @@ class ErrorNodeGraphSameNodeId extends ErrorNodes { exitCode = sysexits.USAGE; } +class ErrorNodeGraphBucketIndex extends ErrorNodes { + description: 'Bucket index is out of range'; + exitCode = sysexits.USAGE; +} + class ErrorNodeConnectionDestroyed extends ErrorNodes { description = 'NodeConnection is destroyed'; exitCode = sysexits.USAGE; @@ -71,6 +76,7 @@ export { ErrorNodeGraphEmptyDatabase, ErrorNodeGraphOversizedBucket, ErrorNodeGraphSameNodeId, + ErrorNodeGraphBucketIndex, ErrorNodeConnectionDestroyed, ErrorNodeConnectionTimeout, ErrorNodeConnectionInfoNotExist, diff --git a/src/nodes/types.ts b/src/nodes/types.ts index ffb9168511..1460e25143 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -1,9 +1,13 @@ import type { Id } from '@matrixai/id'; -import type { Opaque } from '../types'; +import type { Opaque, NonFunctionProperties } from '../types'; import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; +// this should be a string +// actually cause it is a domain +type NodeGraphSpace = '0' | '1'; + type NodeId = Opaque<'NodeId', Id>; type NodeIdString = Opaque<'NodeIdString', string>; type NodeIdEncoded = Opaque<'NodeIdEncoded', string>; @@ -13,9 +17,47 @@ type NodeAddress = { port: Port; }; -type SeedNodes = Record; +type NodeBucketIndex = number; +// type NodeBucket = Record; + +// TODO: +// No longer need to use NodeIdString +// It's an array, if you want to lookup +// It's ordered by the last updated date +// On the other hand, does this matter +// Not really? +// USE THIS TYPE INSTEAD +type NodeBucket = Array<[NodeId, NodeData]>; + +type NodeBucketMeta = { + count: number; +}; + +type NodeBucketMetaProps = NonFunctionProperties; + +// just make the bucket entries also +// bucketIndex anot as a key +// but as the domain +// !!NodeGraph!!meta!!ff!!count + + type NodeData = { + address: NodeAddress; + lastUpdated: number; +}; + + + +// type NodeBucketEntry = { +// address: NodeAddress; +// lastUpdated: Date; +// }; + +type SeedNodes = Record; + +// FIXME: should have a proper name +type NodeEntry = { id: NodeId; address: NodeAddress; distance: BigInt; @@ -41,16 +83,6 @@ type NodeInfo = { chain: ChainData; }; -type NodeBucketIndex = number; - -// The data type to be stored in each leveldb entry for the node table -type NodeBucket = { - [key: string]: { - address: NodeAddress; - lastUpdated: Date; - }; -}; - // Only 1 domain, so don't need a 'domain' value (like /gestalts/types.ts) type NodeGraphOp_ = { // Bucket index @@ -72,10 +104,17 @@ export type { NodeIdEncoded, NodeAddress, SeedNodes, - NodeData, NodeClaim, NodeInfo, + NodeBucketIndex, + NodeBucketMeta, NodeBucket, + + NodeData, + NodeEntry, + // NodeBucketEntry, + NodeGraphOp, + NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 696e31d43b..38c5e5b028 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,29 +1,70 @@ -import type { NodeData, NodeId, NodeIdEncoded } from './types'; +import type { NodeData, NodeId, NodeIdEncoded, NodeBucket, NodeIdString, NodeBucketIndex } from './types'; +import { utils as dbUtils } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import { bytes2BigInt } from '../utils'; +import lexi from 'lexicographic-integer'; +import { bytes2BigInt, bufferSplit } from '../utils'; + +// FIXME: +const prefixBuffer = Buffer.from([33]); +// const prefixBuffer = Buffer.from(dbUtils.prefix); /** - * Compute the distance between two nodes. - * distance = nodeId1 ^ nodeId2 - * where ^ = bitwise XOR operator + * Encodes the NodeId as a `base32hex` string */ -function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { - const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); - return bytes2BigInt(distance); +function encodeNodeId(nodeId: NodeId): NodeIdEncoded { + return nodeId.toMultibase('base32hex') as NodeIdEncoded; } /** - * Find the correct index of the k-bucket to add a new node to. + * Decodes an encoded NodeId string into a NodeId + */ +function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { + if (typeof nodeIdEncoded !== 'string') { + return; + } + const nodeId = IdInternal.fromMultibase(nodeIdEncoded); + if (nodeId == null) { + return; + } + // All NodeIds are 32 bytes long + // The NodeGraph requires a fixed size for Node Ids + if (nodeId.length !== 32) { + return; + } + return nodeId; +} + +/** + * Calculate the bucket index that the target node should be located in * A node's k-buckets are organised such that for the ith k-bucket where * 0 <= i < nodeIdBits, the contacts in this ith bucket are known to adhere to * the following inequality: * 2^i <= distance (from current node) < 2^(i+1) + * This means lower buckets will have less nodes then the upper buckets. + * The highest bucket will contain half of all possible nodes. + * The lowest bucket will only contain 1 node. * * NOTE: because XOR is a commutative operation (i.e. a XOR b = b XOR a), the * order of the passed parameters is actually irrelevant. These variables are * purely named for communicating function purpose. + * + * NOTE: Kademlia literature generally talks about buckets with 1-based indexing + * and that the buckets are ordered from largest to smallest. This means the first + * 1th-bucket is far & large bucket, and the last 255th-bucket is the close bucket. + * This is reversed in our `NodeBucketIndex` encoding. This is so that lexicographic + * sort orders our buckets from closest bucket to farthest bucket. + * + * To convert from `NodeBucketIndex` to nth-bucket in Kademlia literature: + * + * | NodeBucketIndex | Nth-Bucket | + * | --------------- | ---------- | + * | 255 | 1 | farthest & largest + * | 254 | 2 | + * | ... | ... | + * | 1 | 254 | + * | 0 | 256 | closest & smallest */ -function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { +function bucketIndex(sourceNode: NodeId, targetNode: NodeId): NodeBucketIndex { const distance = sourceNode.map((byte, i) => byte ^ targetNode[i]); const MSByteIndex = distance.findIndex((byte) => byte !== 0); if (MSByteIndex === -1) { @@ -37,48 +78,215 @@ function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { } /** - * A sorting compareFn to sort an array of NodeData by increasing distance. + * Encodes bucket index to bucket sublevel key */ -function sortByDistance(a: NodeData, b: NodeData) { - if (a.distance > b.distance) { - return 1; - } else if (a.distance < b.distance) { - return -1; - } else { - return 0; +function bucketKey(bucketIndex: NodeBucketIndex): string { + return lexi.pack(bucketIndex, 'hex'); +} + +/** + * Creates key for buckets sublevel + */ +function bucketsDbKey(bucketIndex: NodeBucketIndex, nodeId: NodeId): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + bucketDbKey(nodeId) + ]); +} + +/** + * Creates key for single bucket sublevel + */ +function bucketDbKey(nodeId: NodeId): Buffer { + return nodeId.toBuffer(); +} + +/** + * Creates key for buckets indexed by lastUpdated sublevel + */ +function lastUpdatedBucketsDbKey(bucketIndex: NodeBucketIndex, lastUpdated: number, nodeId: NodeId): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + lastUpdatedBucketDbKey(lastUpdated, nodeId) + ]); +} + +/** + * Creates key for single bucket indexed by lastUpdated sublevel + */ +function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { + return Buffer.concat([ + Buffer.from(lexi.pack(lastUpdated, 'hex')), + Buffer.from('-'), + nodeId.toBuffer() + ]); +} + +/** + * Parse the NodeGraph buckets sublevel key + * The keys look like `!!` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true + ); + if (bucketKeyBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph buckets key'); } + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + bucketIndex, + bucketKey, + nodeId + }; } /** - * Encodes the NodeId as a `base32hex` string + * Parse the NodeGraph bucket key + * The keys look like `` */ -function encodeNodeId(nodeId: NodeId): NodeIdEncoded { - return nodeId.toMultibase('base32hex') as NodeIdEncoded; +function parseBucketDbKey(keyBuffer: Buffer): NodeId { + const nodeId = IdInternal.fromBuffer(keyBuffer); + return nodeId; } /** - * Decodes an encoded NodeId string into a NodeId + * Parse the NodeGraph index sublevel key + * The keys look like `!!-` + * It is assumed that the `!` is the sublevel prefix. */ -function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { - if (typeof nodeIdEncoded !== 'string') { - return; +function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, lastUpdatedBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true + ); + if (bucketKeyBuffer == null || lastUpdatedBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - const nodeId = IdInternal.fromMultibase(nodeIdEncoded); - if (nodeId == null) { - return; + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + if (bucketIndex == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - // All NodeIds are 32 bytes long - // The NodeGraph requires a fixed size for Node Ids - if (nodeId.length !== 32) { - return; + const { + lastUpdated, + nodeId + } = parseLastUpdatedBucketDbKey(lastUpdatedBuffer); + return { + bucketIndex, + bucketKey, + lastUpdated, + nodeId + }; +} + +/** + * Parse the NodeGraph index bucket sublevel key + * The keys look like `-` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseLastUpdatedBucketDbKey(keyBuffer: Buffer): { + lastUpdated: number; + nodeId: NodeId; +} { + const [lastUpdatedBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + Buffer.from('-'), + 2, + true + ); + if (lastUpdatedBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const lastUpdated = lexi.unpack(lastUpdatedBuffer.toString()); + if (lastUpdated == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + lastUpdated, + nodeId + }; +} + +/** + * Compute the distance between two nodes. + * distance = nodeId1 ^ nodeId2 + * where ^ = bitwise XOR operator + */ +function nodeDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { + const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); + return bytes2BigInt(distance); +} + +function bucketSortByDistance( + bucket: NodeBucket, + nodeId: NodeId, + order: 'asc' | 'desc' = 'asc' +): void { + const distances = {}; + if (order === 'asc') { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = distances[nodeId1] = distances[nodeId1] ?? nodeDistance(nodeId, nodeId1); + const d2 = distances[nodeId2] = distances[nodeId2] ?? nodeDistance(nodeId, nodeId2); + if (d1 < d2) { + return -1; + } else if (d1 > d2) { + return 1; + } else { + return 0; + } + }); + } else { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = distances[nodeId1] = distances[nodeId1] ?? nodeDistance(nodeId, nodeId1); + const d2 = distances[nodeId2] = distances[nodeId2] ?? nodeDistance(nodeId, nodeId2); + if (d1 > d2) { + return -1; + } else if (d1 < d2) { + return 1; + } else { + return 0; + } + }); } - return nodeId; } export { - calculateDistance, - calculateBucketIndex, - sortByDistance, + prefixBuffer, encodeNodeId, decodeNodeId, + bucketIndex, + bucketKey, + bucketsDbKey, + bucketDbKey, + lastUpdatedBucketsDbKey, + lastUpdatedBucketDbKey, + parseBucketsDbKey, + parseBucketDbKey, + parseLastUpdatedBucketsDbKey, + parseLastUpdatedBucketDbKey, + nodeDistance, + bucketSortByDistance, }; diff --git a/src/types.ts b/src/types.ts index b09954b32f..6762c5fba6 100644 --- a/src/types.ts +++ b/src/types.ts @@ -72,6 +72,24 @@ interface FileSystem { type FileHandle = fs.promises.FileHandle; +type FunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? K : never; +}[keyof T]; + +/** + * Functional properties of an object + */ +type FunctionProperties = Pick>; + +type NonFunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? never : K; +}[keyof T]; + +/** + * Non-functional properties of an object + */ +type NonFunctionProperties = Pick>; + export type { POJO, Opaque, @@ -83,4 +101,6 @@ export type { Timer, FileSystem, FileHandle, + FunctionProperties, + NonFunctionProperties, }; diff --git a/src/utils/context.ts b/src/utils/context.ts index d4102debc0..ad6af69ee9 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -2,7 +2,7 @@ type ResourceAcquire = () => Promise< readonly [ResourceRelease, Resource?] >; -type ResourceRelease = () => Promise; +type ResourceRelease = (e?: Error) => Promise; type Resources[]> = { [K in keyof T]: T[K] extends ResourceAcquire ? R : never; @@ -22,6 +22,7 @@ async function withF< ): Promise { const releases: Array = []; const resources: Array = []; + let e_: Error | undefined; try { for (const acquire of acquires) { const [release, resource] = await acquire(); @@ -29,10 +30,13 @@ async function withF< resources.push(resource); } return await f(resources as unknown as Resources); + } catch (e) { + e_ = e; + throw e; } finally { releases.reverse(); for (const release of releases) { - await release(); + await release(e_); } } } @@ -55,6 +59,7 @@ async function* withG< ): AsyncGenerator { const releases: Array = []; const resources: Array = []; + let e_: Error | undefined; try { for (const acquire of acquires) { const [release, resource] = await acquire(); @@ -62,10 +67,13 @@ async function* withG< resources.push(resource); } return yield* g(resources as unknown as Resources); + } catch (e) { + e_ = e; + throw e; } finally { releases.reverse(); for (const release of releases) { - await release(); + await release(e_); } } } diff --git a/src/utils/index.ts b/src/utils/index.ts index cbb38a8bef..08bc47f16a 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -4,4 +4,5 @@ export * from './context'; export * from './utils'; export * from './matchers'; export * from './binary'; +export * from './random'; export * as errors from './errors'; diff --git a/src/utils/locks.ts b/src/utils/locks.ts index eb6f952450..b097dab16d 100644 --- a/src/utils/locks.ts +++ b/src/utils/locks.ts @@ -73,6 +73,14 @@ class RWLock { return this.readersLock.isLocked() || this.writersLock.isLocked(); } + public isLockedReader(): boolean { + return this.readersLock.isLocked(); + } + + public isLockedWriter(): boolean { + return this.writersLock.isLocked(); + } + public async waitForUnlock(): Promise { await Promise.all([ this.readersLock.waitForUnlock(), diff --git a/src/utils/random.ts b/src/utils/random.ts new file mode 100644 index 0000000000..e8dcb7aea0 --- /dev/null +++ b/src/utils/random.ts @@ -0,0 +1,13 @@ +/** + * Gets a random number between min (inc) and max (exc) + * This is not cryptographically-secure + */ +function getRandomInt(min: number, max: number) { + min = Math.ceil(min); + max = Math.floor(max); + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +export { + getRandomInt +}; diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 6b4ca47596..fb361cb1ca 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -220,6 +220,70 @@ function arrayZipWithPadding( ]); } +async function asyncIterableArray( + iterable: AsyncIterable +): Promise> { + const arr: Array = []; + for await (const item of iterable) { + arr.push(item); + } + return arr; +} + +function bufferSplit( + input: Buffer, + delimiter?: Buffer, + limit?: number, + remaining: boolean = false +): Array { + const output: Array = []; + let delimiterOffset = 0; + let delimiterIndex = 0; + let i = 0; + if (delimiter != null) { + while (true) { + if (i === limit) break; + delimiterIndex = input.indexOf( + delimiter, + delimiterOffset + ); + if (delimiterIndex > -1) { + output.push(input.subarray(delimiterOffset, delimiterIndex)); + delimiterOffset = delimiterIndex + delimiter.byteLength; + } else { + const chunk = input.subarray(delimiterOffset); + output.push(chunk); + delimiterOffset += chunk.byteLength; + break; + } + i++; + } + } else { + for (;delimiterIndex < input.byteLength;) { + if (i === limit) break; + delimiterIndex++; + const chunk = input.subarray(delimiterOffset, delimiterIndex); + output.push(chunk); + delimiterOffset += chunk.byteLength; + i++; + } + } + // If remaining, then the rest of the input including delimiters is extracted + if ( + remaining + && limit != null + && output.length > 0 + && delimiterIndex > -1 + && delimiterIndex <= input.byteLength + ) { + const inputRemaining = input.subarray( + delimiterIndex - output[output.length - 1].byteLength + ); + output[output.length - 1] = inputRemaining; + } + return output; +} + function debounce

( f: (...params: P) => any, timeout: number = 0, @@ -250,5 +314,7 @@ export { arrayUnset, arrayZip, arrayZipWithPadding, + asyncIterableArray, + bufferSplit, debounce, }; diff --git a/src/validation/utils.ts b/src/validation/utils.ts index 3ce13f258a..020c1f51a1 100644 --- a/src/validation/utils.ts +++ b/src/validation/utils.ts @@ -165,7 +165,7 @@ function parseHostOrHostname(data: any): Host | Hostname { * Parses number into a Port * Data can be a string-number */ -function parsePort(data: any): Port { +function parsePort(data: any, connect: boolean = false): Port { if (typeof data === 'string') { try { data = parseInteger(data); @@ -176,10 +176,16 @@ function parsePort(data: any): Port { throw e; } } - if (!networkUtils.isPort(data)) { - throw new validationErrors.ErrorParse( - 'Port must be a number between 0 and 65535 inclusive', - ); + if (!networkUtils.isPort(data, connect)) { + if (!connect) { + throw new validationErrors.ErrorParse( + 'Port must be a number between 0 and 65535 inclusive', + ); + } else { + throw new validationErrors.ErrorParse( + 'Port must be a number between 1 and 65535 inclusive', + ); + } } return data; } diff --git a/test-iterator.ts b/test-iterator.ts new file mode 100644 index 0000000000..82a21762cc --- /dev/null +++ b/test-iterator.ts @@ -0,0 +1,31 @@ + + +function getYouG () { + console.log('ALREADY EXECUTED'); + return abc(); +} + +async function *abc() { + console.log('START'); + yield 1; + yield 2; + yield 3; +} + +async function main () { + + // we would want that you don't iterate it + + const g = getYouG(); + + await g.next(); + + // console.log('SUP'); + + // for await (const r of abc()) { + // console.log(r); + // } + +} + +main(); diff --git a/test-lexi.ts b/test-lexi.ts new file mode 100644 index 0000000000..b48f9cea19 --- /dev/null +++ b/test-lexi.ts @@ -0,0 +1,4 @@ +import lexi from 'lexicographic-integer'; + + +console.log(lexi.pack(1646203779)); diff --git a/test-nodegraph.ts b/test-nodegraph.ts new file mode 100644 index 0000000000..33bd58bb7d --- /dev/null +++ b/test-nodegraph.ts @@ -0,0 +1,107 @@ +import type { NodeId, NodeAddress } from './src/nodes/types'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; +import NodeGraph from './src/nodes/NodeGraph'; +import KeyManager from './src/keys/KeyManager'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/db' + }); + + const keyManager = await KeyManager.createKeyManager({ + keysPath: './tmp/keys', + password: 'abc123', + // fresh: true + }); + + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + fresh: true + }); + + for (let i = 0; i < 10; i++) { + await nodeGraph.setNode( + generateRandomNodeId(), + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress + ); + } + + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + + // the bucket lengths are wrong + console.log( + 'BucketIndex', + bucketIndex, + 'Bucket Count', + bucket.length, + ); + + // console.log(bucket); + for (const [nodeId, nodeData] of bucket) { + // console.log('NODEID', nodeId); + // console.log('NODEDATA', nodeData); + // console.log(nodeData.address); + } + } + + for await (const [nodeId, nodeData] of nodeGraph.getNodes()) { + // console.log(nodeId, nodeData); + } + + const bucket = await nodeGraph.getBucket(255, 'lastUpdated'); + console.log(bucket.length); + + // console.log('OLD NODE ID', keyManager.getNodeId()); + // const newNodeId = generateRandomNodeId(); + // console.log('NEW NODE ID', newNodeId); + + // console.log('---------FIRST RESET--------'); + + // await nodeGraph.resetBuckets(newNodeId); + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + + // console.log('---------SECOND RESET--------'); + // const newNodeId2 = generateRandomNodeId(); + // await nodeGraph.resetBuckets(newNodeId2); + + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + await nodeGraph.stop(); + await keyManager.stop(); + await db.stop(); +} + +main(); diff --git a/test-nodeidgen.ts b/test-nodeidgen.ts new file mode 100644 index 0000000000..2f79bddda8 --- /dev/null +++ b/test-nodeidgen.ts @@ -0,0 +1,44 @@ +import type { NodeId } from './src/nodes/types'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const firstNodeId = generateRandomNodeId(); + + + let lastBucket = 0; + let penultimateBucket = 0; + let lowerBuckets = 0; + + for (let i = 0; i < 1000; i++) { + const nodeId = generateRandomNodeId(); + const bucketIndex = nodesUtils.bucketIndex(firstNodeId, nodeId); + if (bucketIndex === 255) { + lastBucket++; + } else if (bucketIndex === 254) { + penultimateBucket++; + } else { + lowerBuckets++; + } + } + + console.log(lastBucket); + console.log(penultimateBucket); + console.log(lowerBuckets); + + +} + +main(); diff --git a/test-order.ts b/test-order.ts new file mode 100644 index 0000000000..f6046d6da7 --- /dev/null +++ b/test-order.ts @@ -0,0 +1,98 @@ +import { DB } from '@matrixai/db'; +import lexi from 'lexicographic-integer'; +import { getUnixtime, hex2Bytes } from './src/utils'; + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/orderdb', + fresh: true + }); + + await db.put([], 'node1', 'value'); + await db.put([], 'node2', 'value'); + await db.put([], 'node3', 'value'); + await db.put([], 'node4', 'value'); + await db.put([], 'node5', 'value'); + await db.put([], 'node6', 'value'); + await db.put([], 'node7', 'value'); + + const now = new Date; + const t1 = new Date(now.getTime() + 1000 * 1); + const t2 = new Date(now.getTime() + 1000 * 2); + const t3 = new Date(now.getTime() + 1000 * 3); + const t4 = new Date(now.getTime() + 1000 * 4); + const t5 = new Date(now.getTime() + 1000 * 5); + const t6 = new Date(now.getTime() + 1000 * 6); + const t7 = new Date(now.getTime() + 1000 * 7); + + // so unix time is only what we really need to know + // further precision is unlikely + // and hex-packed time is shorter keys + // so it is likely faster + // the only issue is that unpacking requires + // converting hex into bytes, then into strings + + // console.log(t1.getTime()); + // console.log(getUnixtime(t1)); + // console.log(lexi.pack(getUnixtime(t1), 'hex')); + // console.log(lexi.pack(t1.getTime(), 'hex')); + // console.log(t1.toISOString()); + + + // buckets0!BUCKETINDEX!NODEID + // buckets0!BUCKETINDEX!date + + // Duplicate times that are put here + // But differentiate by the node1, node2 + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node1', 'value'); + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node2', 'value'); + + await db.put([], lexi.pack(getUnixtime(t1), 'hex') + '-node3', 'value'); + await db.put([], lexi.pack(getUnixtime(t4), 'hex') + '-node4', 'value'); + await db.put([], lexi.pack(getUnixtime(t3), 'hex') + '-node5', 'value'); + await db.put([], lexi.pack(getUnixtime(t2), 'hex') + '-node6', 'value'); + await db.put([], lexi.pack(getUnixtime(t5), 'hex') + '-node7', 'value'); + + // await db.put([], t6.toISOString() + '-node1', 'value'); + // await db.put([], t6.toISOString() + '-node2', 'value'); + + // await db.put([], t1.toISOString() + '-node3', 'value'); + // await db.put([], t4.toISOString() + '-node4', 'value'); + // await db.put([], t3.toISOString() + '-node5', 'value'); + // await db.put([], t2.toISOString() + '-node6', 'value'); + // await db.put([], t5.toISOString() + '-node7', 'value'); + + // Why did this require `-node3` + + // this will awlays get one or the other + + // ok so we if we want to say get a time + // or order it by time + // we are goingto have to create read stream over the bucket right? + // yea so we would have another sublevel, or at least a sublevel formed by the bucket + // one that is the bucket index + // so that would be the correct way to do it + + for await (const o of db.db.createReadStream({ + gte: lexi.pack(getUnixtime(t1), 'hex'), + limit: 1, + // keys: true, + // values: true, + // lte: lexi.pack(getUnixtime(t6)) + })) { + + console.log(o.key.toString()); + + } + + await db.stop(); + + + // so it works + // now if you give it something liek + + +} + +main(); diff --git a/test-sorting.ts b/test-sorting.ts new file mode 100644 index 0000000000..1692fa83f9 --- /dev/null +++ b/test-sorting.ts @@ -0,0 +1,28 @@ +import * as testNodesUtils from './tests/nodes/utils'; + +const arr = [ + { a: 'abc', b: 3}, + { a: 'abc', b: 1}, + { a: 'abc', b: 0}, +]; + +arr.sort((a, b): number => { + if (a.b > b.b) { + return 1; + } else if (a.b < b.b) { + return -1; + } else { + return 0; + } +}); + +console.log(arr); + +const arr2 = [3, 1, 0]; + +arr2.sort(); + +console.log(arr2); + + +console.log(testNodesUtils.generateRandomNodeId()); diff --git a/test-split.ts b/test-split.ts new file mode 100644 index 0000000000..ee06d75d64 --- /dev/null +++ b/test-split.ts @@ -0,0 +1,37 @@ + +function bufferSplit(input: Buffer, delimiter?: Buffer): Array { + const output: Array = []; + let delimiterIndex = 0; + let chunkIndex = 0; + if (delimiter != null) { + while (true) { + const i = input.indexOf( + delimiter, + delimiterIndex + ); + if (i > -1) { + output.push(input.subarray(chunkIndex, i)); + delimiterIndex = i + delimiter.byteLength; + chunkIndex = i + delimiter.byteLength; + } else { + output.push(input.subarray(chunkIndex)); + break; + } + } + } else { + for (let i = 0; i < input.byteLength; i++) { + output.push(input.subarray(i, i + 1)); + } + } + return output; +} + + +const b = Buffer.from('!a!!b!'); + +console.log(bufferSplit(b, Buffer.from('!!'))); +console.log(bufferSplit(b)); + +const s = '!a!!b!'; + +console.log(s.split('!!')); diff --git a/test-trie.ts b/test-trie.ts new file mode 100644 index 0000000000..a17c4165d3 --- /dev/null +++ b/test-trie.ts @@ -0,0 +1,29 @@ +import * as utils from './src/utils'; +import * as nodesUtils from './src/nodes/utils'; + +// 110 +const ownNodeId = Buffer.from([6]); + +const i = 2; + +const maxDistance = utils.bigInt2Bytes(BigInt(2 ** i)); +const minDistance = utils.bigInt2Bytes(BigInt(2 ** (i - 1))); + +console.log('max distance', maxDistance, utils.bytes2Bits(maxDistance)); +console.log('min distance', minDistance, utils.bytes2Bits(minDistance)); + +// ownNodeId XOR maxdistance = GTE node id +const gte = ownNodeId.map((byte, i) => byte ^ maxDistance[i]); + +// ownNodeId XOR mindistance = LT node id +const lt = ownNodeId.map((byte, i) => byte ^ minDistance[i]); + +console.log('Lowest Distance Node (inc)', gte, utils.bytes2Bits(gte)); +console.log('Greatest Distance Node (exc)', lt, utils.bytes2Bits(lt)); + +// function nodeDistance(nodeId1: Buffer, nodeId2: Buffer): bigint { +// const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); +// return utils.bytes2BigInt(distance); +// } + +// console.log(nodeDistance(ownNodeId, Buffer.from([0]))); diff --git a/tests/acl/ACL.test.ts b/tests/acl/ACL.test.ts index a75819f2f8..14ea88cc81 100644 --- a/tests/acl/ACL.test.ts +++ b/tests/acl/ACL.test.ts @@ -10,7 +10,7 @@ import { DB } from '@matrixai/db'; import { ACL, errors as aclErrors } from '@/acl'; import { utils as keysUtils } from '@/keys'; import { utils as vaultsUtils } from '@/vaults'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe(ACL.name, () => { const logger = new Logger(`${ACL.name} test`, LogLevel.WARN, [ @@ -18,14 +18,14 @@ describe(ACL.name, () => { ]); // Node Ids - const nodeIdX = testUtils.generateRandomNodeId(); - const nodeIdY = testUtils.generateRandomNodeId(); - const nodeIdG1First = testUtils.generateRandomNodeId(); - const nodeIdG1Second = testUtils.generateRandomNodeId(); - const nodeIdG1Third = testUtils.generateRandomNodeId(); - const nodeIdG1Fourth = testUtils.generateRandomNodeId(); - const nodeIdG2First = testUtils.generateRandomNodeId(); - const nodeIdG2Second = testUtils.generateRandomNodeId(); + const nodeIdX = testNodesUtils.generateRandomNodeId(); + const nodeIdY = testNodesUtils.generateRandomNodeId(); + const nodeIdG1First = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Second = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Third = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Fourth = testNodesUtils.generateRandomNodeId(); + const nodeIdG2First = testNodesUtils.generateRandomNodeId(); + const nodeIdG2Second = testNodesUtils.generateRandomNodeId(); let dataDir: string; let db: DB; diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index 8cf77303e4..0cc40e087c 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -1,5 +1,4 @@ import type { Host, Port, ProxyConfig } from '@/network/types'; - import type { IAgentServiceServer } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; import type { KeyManager } from '@/keys'; import type { VaultManager } from '@/vaults'; @@ -18,7 +17,7 @@ import { GRPCClientAgent, AgentServiceService, } from '@/agent'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; async function openTestAgentServer({ keyManager, @@ -81,7 +80,7 @@ async function openTestAgentClient( new StreamHandler(), ]); const agentClient = await GRPCClientAgent.createGRPCClientAgent({ - nodeId: nodeId ?? testUtils.generateRandomNodeId(), + nodeId: nodeId ?? testNodesUtils.generateRandomNodeId(), host: '127.0.0.1' as Host, port: port as Port, logger: logger, diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index 062cf6cdf7..85b5987865 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -11,11 +11,12 @@ import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('add', () => { const logger = new Logger('add test', LogLevel.WARN, [new StreamHandler()]); const password = 'helloworld'; - const validNodeId = testUtils.generateRandomNodeId(); + const validNodeId = testNodesUtils.generateRandomNodeId(); const invalidNodeId = IdInternal.fromString('INVALIDID'); const validHost = '0.0.0.0'; const invalidHost = 'INVALIDHOST'; diff --git a/tests/bin/vaults/vaults.test.ts b/tests/bin/vaults/vaults.test.ts index e1cb0c6915..50bd0a9ac8 100644 --- a/tests/bin/vaults/vaults.test.ts +++ b/tests/bin/vaults/vaults.test.ts @@ -10,7 +10,7 @@ import * as vaultsUtils from '@/vaults/utils'; import sysexits from '@/utils/sysexits'; import NotificationsManager from '@/notifications/NotificationsManager'; import * as testBinUtils from '../utils'; -import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -374,7 +374,7 @@ describe('CLI vaults', () => { mockedSendNotification.mockImplementation(async (_) => {}); const vaultId = await polykeyAgent.vaultManager.createVault(vaultName); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -414,7 +414,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -485,7 +485,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), diff --git a/tests/claims/utils.test.ts b/tests/claims/utils.test.ts index f7c6e6410c..069a6dcef4 100644 --- a/tests/claims/utils.test.ts +++ b/tests/claims/utils.test.ts @@ -11,12 +11,13 @@ import * as claimsErrors from '@/claims/errors'; import { utils as keysUtils } from '@/keys'; import { utils as nodesUtils } from '@/nodes'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('claims/utils', () => { // Node Ids - const nodeId1 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); const nodeId1Encoded = nodesUtils.encodeNodeId(nodeId1); - const nodeId2 = testUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); const nodeId2Encoded = nodesUtils.encodeNodeId(nodeId2); let publicKey: PublicKeyPem; diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index d03fe307ab..3603b5e5bf 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -26,6 +26,7 @@ import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('gestaltsDiscoveryByNode', () => { const logger = new Logger('gestaltsDiscoveryByNode test', LogLevel.WARN, [ @@ -35,7 +36,7 @@ describe('gestaltsDiscoveryByNode', () => { const authenticate = async (metaClient, metaServer = new Metadata()) => metaServer; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; let mockedGenerateKeyPair: jest.SpyInstance; diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 1b77af1a34..3080c25fb4 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -24,12 +24,13 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('notificationsRead', () => { const logger = new Logger('notificationsRead test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdSender = testUtils.generateRandomNodeId(); + const nodeIdSender = testNodesUtils.generateRandomNodeId(); const nodeIdSenderEncoded = nodesUtils.encodeNodeId(nodeIdSender); const password = 'helloworld'; const authenticate = async (metaClient, metaServer = new Metadata()) => diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 70c4641dd9..71fda9e9a3 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -237,7 +237,7 @@ describe('Discovery', () => { discovery.queueDiscoveryByIdentity('' as ProviderId, '' as IdentityId), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); await expect( - discovery.queueDiscoveryByNode(testUtils.generateRandomNodeId()), + discovery.queueDiscoveryByNode(testNodesUtils.generateRandomNodeId()), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); }); test('discovery by node', async () => { diff --git a/tests/gestalts/GestaltGraph.test.ts b/tests/gestalts/GestaltGraph.test.ts index fa30c86bdc..84a15c2dba 100644 --- a/tests/gestalts/GestaltGraph.test.ts +++ b/tests/gestalts/GestaltGraph.test.ts @@ -20,19 +20,19 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as gestaltsUtils from '@/gestalts/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GestaltGraph', () => { const logger = new Logger('GestaltGraph Test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdDEE = testUtils.generateRandomNodeId(); + const nodeIdDEE = testNodesUtils.generateRandomNodeId(); const nodeIdDEEEncoded = nodesUtils.encodeNodeId(nodeIdDEE); - const nodeIdDEF = testUtils.generateRandomNodeId(); + const nodeIdDEF = testNodesUtils.generateRandomNodeId(); const nodeIdDEFEncoded = nodesUtils.encodeNodeId(nodeIdDEF); - const nodeIdZZZ = testUtils.generateRandomNodeId(); + const nodeIdZZZ = testNodesUtils.generateRandomNodeId(); const nodeIdZZZEncoded = nodesUtils.encodeNodeId(nodeIdZZZ); let dataDir: string; diff --git a/tests/grpc/GRPCClient.test.ts b/tests/grpc/GRPCClient.test.ts index a4f83a1e06..716778bf91 100644 --- a/tests/grpc/GRPCClient.test.ts +++ b/tests/grpc/GRPCClient.test.ts @@ -10,13 +10,14 @@ import path from 'path'; import fs from 'fs'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { utils as keysUtils } from '@/keys'; -import { Session, SessionManager } from '@/sessions'; -import { errors as grpcErrors } from '@/grpc'; +import Session from '@/sessions/Session'; +import SessionManager from '@/sessions/SessionManager'; +import * as keysUtils from '@/keys/utils'; +import * as grpcErrors from '@/grpc/errors'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as utils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GRPCClient', () => { const logger = new Logger('GRPCClient Test', LogLevel.WARN, [ @@ -60,7 +61,7 @@ describe('GRPCClient', () => { }, }); const keyManager = { - getNodeId: () => testUtils.generateRandomNodeId(), + getNodeId: () => testNodesUtils.generateRandomNodeId(), } as KeyManager; // Cheeky mocking. sessionManager = await SessionManager.createSessionManager({ db, diff --git a/tests/identities/IdentitiesManager.test.ts b/tests/identities/IdentitiesManager.test.ts index b7ca969b0c..23000440b9 100644 --- a/tests/identities/IdentitiesManager.test.ts +++ b/tests/identities/IdentitiesManager.test.ts @@ -17,7 +17,7 @@ import * as identitiesErrors from '@/identities/errors'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import TestProvider from './TestProvider'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('IdentitiesManager', () => { const logger = new Logger('IdentitiesManager Test', LogLevel.WARN, [ @@ -219,7 +219,7 @@ describe('IdentitiesManager', () => { expect(identityDatas).toHaveLength(1); expect(identityDatas).not.toContainEqual(identityData); // Now publish a claim - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); const signatures: Record = {}; signatures[nodeIdSome] = { diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index ec5d17e86a..bbb5074547 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -13,8 +13,9 @@ import { } from '@/network'; import * as keysUtils from '@/keys/utils'; import { promisify, promise, timerStart, timerStop, poll } from '@/utils'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; /** * Mock HTTP Connect Request @@ -113,11 +114,11 @@ describe(Proxy.name, () => { const logger = new Logger(`${Proxy.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); - const nodeIdRandom = testUtils.generateRandomNodeId(); + const nodeIdRandom = testNodesUtils.generateRandomNodeId(); const authToken = 'abc123'; let keyPairPem: KeyPairPem; let certPem: string; diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 558e2852d3..a25a74c12d 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -35,8 +35,9 @@ import * as GRPCErrors from '@/grpc/errors'; import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; +import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; -import * as grpcTestUtils from '../grpc/utils'; +import * as testGrpcUtils from '../grpc/utils'; const destroyCallback = async () => {}; @@ -73,7 +74,7 @@ describe(`${NodeConnection.name} test`, () => { const password = 'password'; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; @@ -663,7 +664,7 @@ describe(`${NodeConnection.name} test`, () => { "should call `killSelf and throw if the server %s's during testUnaryFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -708,7 +709,7 @@ describe(`${NodeConnection.name} test`, () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); @@ -732,7 +733,7 @@ describe(`${NodeConnection.name} test`, () => { "should call `killSelf and throw if the server %s's during testStreamFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -777,7 +778,7 @@ describe(`${NodeConnection.name} test`, () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index 5fac8e30ad..ebb4d08003 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -19,8 +19,7 @@ import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import * as nodesTestUtils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { const logger = new Logger( @@ -407,11 +406,11 @@ describe(`${NodeConnectionManager.name} general test`, () => { try { // Generate the node ID to find the closest nodes to (in bucket 100) const nodeId = keyManager.getNodeId(); - const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); + const nodeIdToFind = testNodesUtils.generateNodeIdForBucket(nodeId, 100); // Now generate and add 20 nodes that will be close to this node ID const addedClosestNodes: NodeData[] = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( nodeIdToFind, i, ); @@ -474,7 +473,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { // Now generate and add 20 nodes that will be close to this node ID const addedClosestNodes: NodeData[] = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( targetNodeId, i, ); @@ -536,8 +535,8 @@ describe(`${NodeConnectionManager.name} general test`, () => { // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); - const targetNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); await nodeConnectionManager.sendHolePunchMessage( remoteNodeId1, sourceNodeId, @@ -573,7 +572,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); const relayMessage = new nodesPB.Relay(); relayMessage.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMessage.setTargetId(nodesUtils.encodeNodeId(remoteNodeId1)); diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 6b9eec7008..1da3747ce4 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,59 +1,40 @@ -import type { Host, Port } from '@/network/types'; -import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import type { NodeId, NodeData, NodeAddress, NodeBucket, NodeBucketIndex } from '@/nodes/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; -import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; -import Sigchain from '@/sigchain/Sigchain'; -import * as nodesTestUtils from './utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as utils from '@/utils'; +import * as testUtils from '../utils'; +import * as testNodesUtils from './utils'; describe(`${NodeGraph.name} test`, () => { - const localHost = '127.0.0.1' as Host; - const port = 0 as Port; const password = 'password'; - let nodeGraph: NodeGraph; - let nodeId: NodeId; - - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; - - const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - let proxy: Proxy; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let dataDir: string; let keyManager: KeyManager; + let dbKey: Buffer; + let dbPath: string; let db: DB; - let nodeConnectionManager: NodeConnectionManager; - let sigchain: Sigchain; - - const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; - - const mockedGenerateDeterministicKeyPair = jest.spyOn( - keysUtils, - 'generateDeterministicKeyPair', - ); - - beforeEach(async () => { - mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { - return keysUtils.generateKeyPair(bits); - }); - + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValue(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValue(globalKeyPair); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -63,559 +44,672 @@ describe(`${NodeGraph.name} test`, () => { keysPath, logger, }); - proxy = new Proxy({ - authToken: 'auth', - logger: logger, - }); - await proxy.start({ - serverHost: localHost, - serverPort: port, - tlsConfig: { - keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, - certChainPem: await keyManager.getRootCertChainPem(), - }, + dbKey = await keysUtils.generateKey(); + dbPath = `${dataDir}/db`; + }); + afterAll(async () => { + await keyManager.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, }); - const dbPath = `${dataDir}/db`; + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + beforeEach(async () => { db = await DB.createDB({ dbPath, logger, crypto: { - key: keyManager.dbKey, + key: dbKey, ops: { encrypt: keysUtils.encryptWithKey, decrypt: keysUtils.decryptWithKey, }, }, }); - sigchain = await Sigchain.createSigchain({ - keyManager: keyManager, - db: db, - logger: logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ - db, - keyManager, - logger, - }); - nodeConnectionManager = new NodeConnectionManager({ - keyManager: keyManager, - nodeGraph: nodeGraph, - proxy: proxy, - logger: logger, - }); - await nodeConnectionManager.start(); - // Retrieve the NodeGraph reference from NodeManager - nodeId = keyManager.getNodeId(); }); - afterEach(async () => { await db.stop(); - await sigchain.stop(); - await nodeConnectionManager.stop(); - await nodeGraph.stop(); - await keyManager.stop(); - await proxy.stop(); - await fs.promises.rm(dataDir, { - force: true, - recursive: true, - }); + await db.destroy(); }); - - test('NodeGraph readiness', async () => { - const nodeGraph2 = await NodeGraph.createNodeGraph({ + test('get, set and unset node IDs', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, - logger, + logger }); - // @ts-ignore - await expect(nodeGraph2.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeGraphRunning, + let nodeId1: NodeId; + do { + nodeId1 = testNodesUtils.generateRandomNodeId(); + } while (nodeId1.equals(keyManager.getNodeId())); + let nodeId2: NodeId; + do { + nodeId2 = testNodesUtils.generateRandomNodeId(); + } while (nodeId2.equals(keyManager.getNodeId())); + + await nodeGraph.setNode( + nodeId1, + { + host: '10.0.0.1', + port: 1234 + } as NodeAddress ); - // Should be a noop - await nodeGraph2.start(); - await nodeGraph2.stop(); - await nodeGraph2.destroy(); - await expect(async () => { - await nodeGraph2.start(); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - }); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeGraph.setNode(nodeId1, nodeAddress1); - expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); - - // Unknown node - expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); - }); - test('finds correct node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address - const foundAddress = await nodeGraph.getNode(newNode2Id); - expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); - }); - test('unable to find node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address (of non-existent node) - const foundAddress = await nodeGraph.getNode(dummyNode); - expect(foundAddress).toBeUndefined(); - }); - test('adds a single node into a bucket', async () => { - // New node added - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Check new node is in retrieved bucket from database - // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 - const bucket = await nodeGraph.getBucket(1); - expect(bucket).toBeDefined(); - expect(bucket![newNode2Id]).toEqual({ - address: { host: '227.1.1.1', port: 4567 }, - lastUpdated: expect.any(Date), + const nodeData1 = await nodeGraph.getNode(nodeId1); + expect(nodeData1).toStrictEqual({ + address: { + host: '10.0.0.1', + port: 1234 + }, + lastUpdated: expect.any(Number) + }); + await utils.sleep(1000); + await nodeGraph.setNode( + nodeId2, + { + host: 'abc.com', + port: 8978 + } as NodeAddress + ); + const nodeData2 = await nodeGraph.getNode(nodeId2); + expect(nodeData2).toStrictEqual({ + address: { + host: 'abc.com', + port: 8978 + }, + lastUpdated: expect.any(Number) }); + expect(nodeData2!.lastUpdated > nodeData1!.lastUpdated).toBe(true); + const nodes = await utils.asyncIterableArray( + nodeGraph.getNodes() + ); + expect(nodes).toHaveLength(2); + expect(nodes).toContainEqual([ + nodeId1, + { + address: { + host: '10.0.0.1', + port: 1234 + }, + lastUpdated: expect.any(Number) + } + ]); + expect(nodes).toContainEqual([ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978 + }, + lastUpdated: expect.any(Number) + } + ]); + await nodeGraph.unsetNode(nodeId1); + expect(await nodeGraph.getNode(nodeId1)).toBeUndefined(); + expect(await utils.asyncIterableArray(nodeGraph.getNodes())).toStrictEqual([ + [ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978 + }, + lastUpdated: expect.any(Number) + } + ] + ]); + await nodeGraph.unsetNode(nodeId2); + await nodeGraph.stop(); }); - test('adds multiple nodes into the same bucket', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, + test('get all nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + let nodeIds = Array.from( + { length: 25 }, + () => { + return testNodesUtils.generateRandomNodeId(); + } ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()) ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + let bucketIndexes: Array; + let nodes: Array<[NodeId, NodeData]>; + nodes = await utils.asyncIterableArray( + nodeGraph.getNodes() ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(4); - expect(bucket).toBeDefined(); - if (!bucket) fail('bucket should be defined, letting TS know'); - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - }); - test('adds a single node into different buckets', async () => { - // New node for bucket 3 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); - const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - // New node for bucket 255 (the highest possible bucket) - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const bucket3 = await nodeGraph.getBucket(3); - const bucket351 = await nodeGraph.getBucket(255); - if (bucket3 && bucket351) { - expect(bucket3[newNode1Id]).toEqual({ - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(Date), - }); - expect(bucket351[newNode2Id]).toEqual({ - address: { host: '2.2.2.2', port: 2222 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('deletes a single node (and removes bucket)', async () => { - // New node for bucket 2 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - // Check the bucket is there first - const bucket = await nodeGraph.getBucket(2); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect(nodes).toHaveLength(0); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode( + nodeId, + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress + ); } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check bucket no longer exists - const newBucket = await nodeGraph.getBucket(2); - expect(newBucket).toBeUndefined(); - }); - test('deletes a single node (and retains remainder of bucket)', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, + nodes = await utils.asyncIterableArray( + nodeGraph.getNodes() ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes ascending + bucketIndexes = nodes.map(([nodeId]) => nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }) + ).toBe(true); + // Sorted by bucket indexes ascending explicitly + nodes = await utils.asyncIterableArray( + nodeGraph.getNodes('asc') ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + bucketIndexes = nodes.map(([nodeId]) => nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }) + ).toBe(true); + nodes = await utils.asyncIterableArray( + nodeGraph.getNodes('desc') + ); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes descending + bucketIndexes = nodes.map(([nodeId]) => nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] >= bucketIndex; + }) + ).toBe(true); + await nodeGraph.stop(); + }); + test('setting same node ID throws error', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + await expect( + nodeGraph.setNode(keyManager.getNodeId(), { + host: '127.0.0.1', + port: 55555 + } as NodeAddress) + ).rejects.toThrow(nodesErrors.ErrorNodeGraphSameNodeId); + await nodeGraph.stop(); + }); + test('get bucket with 1 node', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + let nodeId: NodeId; + do { + nodeId = testNodesUtils.generateRandomNodeId(); + } while (nodeId.equals(keyManager.getNodeId())); + // Set one node + await nodeGraph.setNode( nodeId, - bucketIndex, - 2, + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucketIndex = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId); const bucket = await nodeGraph.getBucket(bucketIndex); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(bucketIndex); - if (newBucket) { - expect(newBucket[newNode1Id]).toBeUndefined(); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); + expect(bucket).toHaveLength(1); + expect(bucket[0]).toStrictEqual([ + nodeId, + { + address: { + host: '127.0.0.1', + port: 55555 + }, + lastUpdated: expect.any(Number) + } + ]); + expect(await nodeGraph.getBucketMeta(bucketIndex)).toStrictEqual({ + count: 1 + }); + // Adjacent bucket should be empty + let bucketIndex_: number; + if (bucketIndex >= nodeId.length * 8 - 1) { + bucketIndex_ = bucketIndex - 1; + } else if (bucketIndex === 0) { + bucketIndex_ = bucketIndex + 1; } else { - // Should be unreachable - fail('New bucket undefined'); + bucketIndex_ = bucketIndex + 1 } + expect(await nodeGraph.getBucket(bucketIndex_)).toHaveLength(0); + expect(await nodeGraph.getBucketMeta(bucketIndex_)).toStrictEqual({ + count: 0 + }); + await nodeGraph.stop(); }); - test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - // Keep a record of the first node ID that we added - const firstNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, + test('get bucket with multiple nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + // Contiguous node IDs starting from 0 + let nodeIds = Array.from( + { length: 25 }, + (_, i) => IdInternal.create( + utils.bigInt2Bytes(BigInt(i), keyManager.getNodeId().byteLength) + ) ); - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; + nodeIds = nodeIds.filter((nodeId) => !nodeId.equals(keyManager.getNodeId())); + for (const nodeId of nodeIds) { + await utils.sleep(100); await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), - nodeAddress, + nodeId, + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress ); - // Increment the current node ID } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, - ); + // Use first and last buckets because node IDs may be split between buckets + const bucketIndexFirst = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeIds[0]); + const bucketIndexLast = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeIds[nodeIds.length - 1]); + const bucketFirst = await nodeGraph.getBucket(bucketIndexFirst); + const bucketLast = await nodeGraph.getBucket(bucketIndexLast); + let bucket: NodeBucket; + let bucketIndex: NodeBucketIndex; + if (bucketFirst.length >= bucketLast.length) { + bucket = bucketFirst; + bucketIndex = bucketIndexFirst; } else { - // Should be unreachable - fail('Bucket undefined'); + bucket = bucketLast; + bucketIndex = bucketIndexLast; } - - // Attempt to add a new node into this full bucket (increment the last node - // ID that was added) - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - nodeGraph.maxNodesPerBucket + 1, + expect(bucket.length > 1).toBe(true); + let bucketNodeIds = bucket.map(([nodeId]) => nodeId); + // The node IDs must be sorted lexicographically + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare( + bucketNodeIds[i], + nodeId + ) < 1; + }) + ).toBe(true); + // Sort by node ID asc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'asc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare( + bucketNodeIds[i], + nodeId + ) < 0; + }) + ).toBe(true); + // Sort by node ID desc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'desc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare( + bucketNodeIds[i], + nodeId + ) > 0; + }) + ).toBe(true); + // Sort by distance asc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'asc'); + let bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId) ); - const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; - await nodeGraph.setNode(newNodeId, newNodeAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket (but no more) - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, - ); - // Ensure that this new node is in the bucket - expect(finalBucket[newNodeId]).toEqual({ - address: newNodeAddress, - lastUpdated: expect.any(Date), - }); - // NODEID1 should have been removed from this bucket (as this was the least active) - // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first) - expect(finalBucket[firstNodeId]).toBeUndefined(); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - }); - test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - const currNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }) + ).toBe(true); + // Sort by distance desc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'desc'); + bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId) ); - // Keep a record of the first node ID that we added - // const firstNodeId = currNodeId; - let increment = 1; - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }) + ).toBe(true); + // Sort by lastUpdated asc + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'asc'); + let bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }) + ).toBe(true); + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'desc'); + bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }) + ).toBe(true); + await nodeGraph.stop(); + }); + test('get all buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 50; i++) { + await utils.sleep(50); await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), - nodeAddress, + testNodesUtils.generateRandomNodeId(), + { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2**16) + } as NodeAddress ); - // Increment the current node ID - skip for the last one to keep currNodeId - // as the last added node ID - if (i !== nodeGraph.maxNodesPerBucket) { - increment++; + } + let bucketIndex_ = -1; + // Ascending order + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('nodeId', 'asc')) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare( + bucketNodeIds[i], + nodeId + ) < 0; + }) + ).toBe(true); } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + // There must have been at least 1 bucket + expect(bucketIndex_).not.toBe(-1); + // Descending order + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('nodeId', 'desc')) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare( + bucketNodeIds[i], + nodeId + ) > 0; + }) + ).toBe(true); + } + expect(bucketIndex_).not.toBe(keyManager.getNodeId().length * 8); + // Distance ascending order + // Lower distance buckets first + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('distance', 'asc')) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId) ); - } else { - // Should be unreachable - fail('Bucket undefined'); + // it's the LAST bucket that fails this + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }) + ).toBe(true); } - - // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active" - // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one - // So, re-add the last node - const newLastAddress: NodeAddress = { - host: '30.30.30.30' as Host, - port: 30 as Port, - }; - await nodeGraph.setNode(currNodeId, newLastAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Distance descending order + // Higher distance buckets first + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('distance', 'desc')) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId) ); - // Ensure that this new node is in the bucket - expect(finalBucket[currNodeId]).toEqual({ - address: newLastAddress, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }) + ).toBe(true); } + // Last updated ascending order + // Bucket index is ascending + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('lastUpdated', 'asc')) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }) + ).toBe(true); + } + // Last updated descending order + // Bucket index is descending + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets('lastUpdated', 'desc')) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + expect(bucket.length <= nodeGraph.nodeBucketLimit).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }) + ).toBe(true); + } + await nodeGraph.stop(); }); - test('retrieves all buckets (in expected lexicographic order)', async () => { - // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) - // Bucket 1 (minimum): - - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Bucket 4 (multiple nodes in 1 bucket): - const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); - const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; - await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); - const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; - await nodeGraph.setNode(node42Id, node42Address); - - // Bucket 10 (lexicographic ordering - should appear after 2): - const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); - const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; - await nodeGraph.setNode(node10Id, node10Address); - - // Bucket 255 (maximum): - const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const node255Address = { - host: '255.255.255.255', - port: 255, - } as NodeAddress; - await nodeGraph.setNode(node255Id, node255Address); - - const buckets = await nodeGraph.getAllBuckets(); - expect(buckets.length).toBe(4); - // Buckets should be returned in lexicographic ordering (using hex keys to - // ensure the bucket indexes are in numberical order) - expect(buckets).toEqual([ - { - [node1Id]: { - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(String), - }, - }, - { - [node41Id]: { - address: { host: '41.41.41.41', port: 4141 }, - lastUpdated: expect.any(String), - }, - [node42Id]: { - address: { host: '42.42.42.42', port: 4242 }, - lastUpdated: expect.any(String), - }, - }, - { - [node10Id]: { - address: { host: '10.10.10.10', port: 1010 }, - lastUpdated: expect.any(String), - }, - }, - { - [node255Id]: { - address: { host: '255.255.255.255', port: 255 }, - lastUpdated: expect.any(String), - }, - }, - ]); - }); - test( - 'refreshes buckets', - async () => { - const initialNodes: Record = {}; - // Generate and add some nodes - for (let i = 1; i < 255; i += 20) { - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - keyManager.getNodeId(), - i, - ); - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode(newNodeId, nodeAddress); - initialNodes[newNodeId] = { - id: newNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance( - keyManager.getNodeId(), - newNodeId, - ), - }; + test('reset buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode( + testNodesUtils.generateRandomNodeId(), + { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2**16) + } as NodeAddress + ); + } + const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + // Reset the buckets according to the new node ID + // Note that this should normally be only executed when the key manager NodeID changes + // This means methods that use the KeyManager's node ID cannot be used here in this test + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } - - // Renew the keypair - await keyManager.renewRootKeyPair('newPassword'); - // Reset the test's node ID state - nodeId = keyManager.getNodeId(); - // Refresh the buckets - await nodeGraph.refreshBuckets(); - - // Get all the new buckets, and expect that each node is in the correct bucket - const newBuckets = await nodeGraph.getAllBuckets(); - let nodeCount = 0; - for (const b of newBuckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - // Check that it was a node in the original DB - expect(initialNodes[nodeId]).toBeDefined(); - // Check it's in the correct bucket - const expectedIndex = nodesUtils.calculateBucketIndex( - keyManager.getNodeId(), - nodeId, - ); - const expectedBucket = await nodeGraph.getBucket(expectedIndex); - expect(expectedBucket).toBeDefined(); - expect(expectedBucket![nodeId]).toBeDefined(); - // Check it has the correct address - expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); - nodeCount++; + } + expect(buckets1).not.toStrictEqual(buckets0); + // Resetting again should change the space + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + // Resetting to the same NodeId results in the same bucket structure + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets3 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets3).toStrictEqual(buckets2); + // Resetting to an existing NodeId + const nodeIdExisting = buckets3[0][1][0][0]; + let nodeIdExistingFound = false; + await nodeGraph.resetBuckets(nodeIdExisting); + const buckets4 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets4.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets4) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + if (nodeId.equals(nodeIdExisting)) { + nodeIdExistingFound = true; } + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdExisting, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } - // We had less than k (20) nodes, so we expect that all nodes will be re-added - // If we had more than k nodes, we may lose some of them (because the nodes - // may be re-added to newly full buckets) - expect(Object.keys(initialNodes).length).toEqual(nodeCount); - }, - global.defaultTimeout * 4, - ); - test('updates node', async () => { - // New node added - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Check new node is in retrieved bucket from database - const bucket = await nodeGraph.getBucket(2); - const time1 = bucket![node1Id].lastUpdated; - - // Update node and check that time is later - const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; - await nodeGraph.updateNode(node1Id, newNode1Address); - - const bucket2 = await nodeGraph.getBucket(2); - const time2 = bucket2![node1Id].lastUpdated; - expect(bucket2![node1Id].address).toEqual(newNode1Address); - expect(time1 < time2).toBeTruthy(); + } + expect(buckets4).not.toStrictEqual(buckets3); + // The existing node ID should not be put into the NodeGraph + expect(nodeIdExistingFound).toBe(false); + await nodeGraph.stop(); + }); + test('reset buckets is persistent', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode( + testNodesUtils.generateRandomNodeId(), + { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2**16) + } as NodeAddress + ); + } + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2**16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + await nodeGraph.stop(); }); }); diff --git a/tests/nodes/NodeGraph.test.ts.old b/tests/nodes/NodeGraph.test.ts.old new file mode 100644 index 0000000000..1960c02d36 --- /dev/null +++ b/tests/nodes/NodeGraph.test.ts.old @@ -0,0 +1,624 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import * as nodesErrors from '@/nodes/errors'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import Sigchain from '@/sigchain/Sigchain'; +import * as nodesTestUtils from './utils'; + +describe(`${NodeGraph.name} test`, () => { + const password = 'password'; + let nodeGraph: NodeGraph; + let nodeId: NodeId; + + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const dummyNode = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + new StreamHandler(), + ]); + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let dataDir: string; + let keyManager: KeyManager; + let db: DB; + let nodeConnectionManager: NodeConnectionManager; + let sigchain: Sigchain; + + const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = `${dataDir}/keys`; + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger, + }); + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger, + }); + + revProxy = new ReverseProxy({ + logger: logger, + }); + + await fwdProxy.start({ + tlsConfig: { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: await keyManager.getRootCertChainPem(), + }, + }); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + sigchain = await Sigchain.createSigchain({ + keyManager: keyManager, + db: db, + logger: logger, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager: keyManager, + nodeGraph: nodeGraph, + fwdProxy: fwdProxy, + revProxy: revProxy, + logger: logger, + }); + await nodeConnectionManager.start(); + // Retrieve the NodeGraph reference from NodeManager + nodeId = keyManager.getNodeId(); + }); + + afterEach(async () => { + await db.stop(); + await sigchain.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await keyManager.stop(); + await fwdProxy.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + }); + + test('NodeGraph readiness', async () => { + const nodeGraph2 = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // @ts-ignore + await expect(nodeGraph2.destroy()).rejects.toThrow( + nodesErrors.ErrorNodeGraphRunning, + ); + // Should be a noop + await nodeGraph2.start(); + await nodeGraph2.stop(); + await nodeGraph2.destroy(); + await expect(async () => { + await nodeGraph2.start(); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + }); + test('knows node (true and false case)', async () => { + // Known node + const nodeAddress1: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + await nodeGraph.setNode(nodeId1, nodeAddress1); + expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); + + // Unknown node + expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); + }); + test('finds correct node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address + const foundAddress = await nodeGraph.getNode(newNode2Id); + expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); + }); + test('unable to find node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address (of non-existent node) + const foundAddress = await nodeGraph.getNode(dummyNode); + expect(foundAddress).toBeUndefined(); + }); + test('adds a single node into a bucket', async () => { + // New node added + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Check new node is in retrieved bucket from database + // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 + const bucket = await nodeGraph.getBucket(1); + expect(bucket).toBeDefined(); + expect(bucket![newNode2Id]).toEqual({ + address: { host: '227.1.1.1', port: 4567 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds multiple nodes into the same bucket', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(4); + expect(bucket).toBeDefined(); + if (!bucket) fail('bucket should be defined, letting TS know'); + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds a single node into different buckets', async () => { + // New node for bucket 3 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); + const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + // New node for bucket 255 (the highest possible bucket) + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const bucket3 = await nodeGraph.getBucket(3); + const bucket351 = await nodeGraph.getBucket(255); + if (bucket3 && bucket351) { + expect(bucket3[newNode1Id]).toEqual({ + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(Date), + }); + expect(bucket351[newNode2Id]).toEqual({ + address: { host: '2.2.2.2', port: 2222 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('deletes a single node (and removes bucket)', async () => { + // New node for bucket 2 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + // Check the bucket is there first + const bucket = await nodeGraph.getBucket(2); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check bucket no longer exists + const newBucket = await nodeGraph.getBucket(2); + expect(newBucket).toBeUndefined(); + }); + test('deletes a single node (and retains remainder of bucket)', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(bucketIndex); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check node no longer exists in the bucket + const newBucket = await nodeGraph.getBucket(bucketIndex); + if (newBucket) { + expect(newBucket[newNode1Id]).toBeUndefined(); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('New bucket undefined'); + } + }); + test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + // Keep a record of the first node ID that we added + const firstNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), + nodeAddress, + ); + // Increment the current node ID + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Attempt to add a new node into this full bucket (increment the last node + // ID that was added) + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + nodeGraph.maxNodesPerBucket + 1, + ); + const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; + await nodeGraph.setNode(newNodeId, newNodeAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket (but no more) + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[newNodeId]).toEqual({ + address: newNodeAddress, + lastUpdated: expect.any(Date), + }); + // NODEID1 should have been removed from this bucket (as this was the least active) + // The first node added should have been removed from this bucket (as this + // was the least active, purely because it was inserted first) + expect(finalBucket[firstNodeId]).toBeUndefined(); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + const currNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + // Keep a record of the first node ID that we added + // const firstNodeId = currNodeId; + let increment = 1; + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), + nodeAddress, + ); + // Increment the current node ID - skip for the last one to keep currNodeId + // as the last added node ID + if (i !== nodeGraph.maxNodesPerBucket) { + increment++; + } + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // If we tried to re-add the first node, it would simply remove the original + // first node, as this is the "least active" + // We instead want to check that we don't mistakenly delete a node if we're + // updating an existing one + // So, re-add the last node + const newLastAddress: NodeAddress = { + host: '30.30.30.30' as Host, + port: 30 as Port, + }; + await nodeGraph.setNode(currNodeId, newLastAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[currNodeId]).toEqual({ + address: newLastAddress, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('retrieves all buckets (in expected lexicographic order)', async () => { + // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) + // Bucket 1 (minimum): + + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Bucket 4 (multiple nodes in 1 bucket): + const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); + const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; + await nodeGraph.setNode(node41Id, node41Address); + const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); + const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; + await nodeGraph.setNode(node42Id, node42Address); + + // Bucket 10 (lexicographic ordering - should appear after 2): + const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); + const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; + await nodeGraph.setNode(node10Id, node10Address); + + // Bucket 255 (maximum): + const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const node255Address = { + host: '255.255.255.255', + port: 255, + } as NodeAddress; + await nodeGraph.setNode(node255Id, node255Address); + + const buckets = await nodeGraph.getAllBuckets(); + expect(buckets.length).toBe(4); + // Buckets should be returned in lexicographic ordering (using hex keys to + // ensure the bucket indexes are in numberical order) + expect(buckets).toEqual([ + { + [node1Id]: { + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(String), + }, + }, + { + [node41Id]: { + address: { host: '41.41.41.41', port: 4141 }, + lastUpdated: expect.any(String), + }, + [node42Id]: { + address: { host: '42.42.42.42', port: 4242 }, + lastUpdated: expect.any(String), + }, + }, + { + [node10Id]: { + address: { host: '10.10.10.10', port: 1010 }, + lastUpdated: expect.any(String), + }, + }, + { + [node255Id]: { + address: { host: '255.255.255.255', port: 255 }, + lastUpdated: expect.any(String), + }, + }, + ]); + }); + test( + 'refreshes buckets', + async () => { + const initialNodes: Record = {}; + // Generate and add some nodes + for (let i = 1; i < 255; i += 20) { + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + i, + ); + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode(newNodeId, nodeAddress); + initialNodes[newNodeId] = { + id: newNodeId, + address: nodeAddress, + distance: nodesUtils.calculateDistance( + keyManager.getNodeId(), + newNodeId, + ), + }; + } + + // Renew the keypair + await keyManager.renewRootKeyPair('newPassword'); + // Reset the test's node ID state + nodeId = keyManager.getNodeId(); + // Refresh the buckets + await nodeGraph.refreshBuckets(); + + // Get all the new buckets, and expect that each node is in the correct bucket + const newBuckets = await nodeGraph.getAllBuckets(); + let nodeCount = 0; + for (const b of newBuckets) { + for (const n of Object.keys(b)) { + const nodeId = IdInternal.fromString(n); + // Check that it was a node in the original DB + expect(initialNodes[nodeId]).toBeDefined(); + // Check it's in the correct bucket + const expectedIndex = nodesUtils.calculateBucketIndex( + keyManager.getNodeId(), + nodeId, + ); + const expectedBucket = await nodeGraph.getBucket(expectedIndex); + expect(expectedBucket).toBeDefined(); + expect(expectedBucket![nodeId]).toBeDefined(); + // Check it has the correct address + expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); + nodeCount++; + } + } + // We had less than k (20) nodes, so we expect that all nodes will be re-added + // If we had more than k nodes, we may lose some of them (because the nodes + // may be re-added to newly full buckets) + expect(Object.keys(initialNodes).length).toEqual(nodeCount); + }, + global.defaultTimeout * 4, + ); + test('updates node', async () => { + // New node added + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Check new node is in retrieved bucket from database + const bucket = await nodeGraph.getBucket(2); + const time1 = bucket![node1Id].lastUpdated; + + // Update node and check that time is later + const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; + await nodeGraph.updateNode(node1Id, newNode1Address); + + const bucket2 = await nodeGraph.getBucket(2); + const time2 = bucket2![node1Id].lastUpdated; + expect(bucket2![node1Id].address).toEqual(newNode1Address); + expect(time1 < time2).toBeTruthy(); + }); +}); diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index ee1aeadc46..5d08cf8012 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -1,48 +1,69 @@ import type { NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import lexi from 'lexicographic-integer'; import { IdInternal } from '@matrixai/id'; +import { DB } from '@matrixai/db'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; -describe('Nodes utils', () => { - test('basic distance calculation', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 23, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]); - - const distance = nodesUtils.calculateDistance(nodeId1, nodeId2); - expect(distance).toEqual(316912758671486456376015716356n); +describe('nodes/utils', () => { + const logger = new Logger(`nodes/utils test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + let dataDir: string; + let db: DB; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const dbKey = await keysUtils.generateKey(); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); }); - test('calculates correct first bucket (bucket 0)', async () => { - // "1" XOR "0" = distance of 1 - // Therefore, bucket 0 - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(0); + afterEach(async () => { + await db.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); }); - test('calculates correct arbitrary bucket (bucket 63)', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 255, 0, 0, 0, 0, 0, 0, 0, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(63); + test('calculating bucket index from the same node ID', () => { + const nodeId1 = IdInternal.create([0]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + expect(distance).toBe(0n); + expect(() => nodesUtils.bucketIndex(nodeId1, nodeId2)).toThrow(RangeError); + }); + test('calculating bucket index 0', () => { + // Distance is calculated based on XOR operation + // 1 ^ 0 == 1 + // Distance of 1 is bucket 0 + const nodeId1 = IdInternal.create([1]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + expect(distance).toBe(1n) + expect(bucketIndex).toBe(0); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2**bucketIndex <= distance).toBe(true); + expect(distance < 2**(bucketIndex + 1)).toBe(true); }); - test('calculates correct last bucket (bucket 255)', async () => { + test('calculating bucket index 255', () => { const nodeId1 = IdInternal.create([ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -51,7 +72,120 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(255); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2**bucketIndex <= distance).toBe(true); + expect(distance < 2**(bucketIndex + 1)).toBe(true); + }); + test('calculating bucket index randomly', () => { + for (let i = 0; i < 1000; i++) { + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + if (nodeId1.equals(nodeId2)) { + continue; + } + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2**bucketIndex <= distance).toBe(true); + expect(distance < 2**(bucketIndex + 1)).toBe(true); + } + }); + test('parse NodeGraph buckets db key', async () => { + const bucketsDb = await db.level('buckets'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const nodeId = testNodesUtils.generateRandomNodeId(); + data.push({ + bucketIndex, + bucketKey, + nodeId, + key: Buffer.concat([ + Buffer.from(bucketKey), + nodeId + ]) + }); + const bucketDomain = ['buckets', bucketKey]; + await db.put( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + null + ); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of the bucket key and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of bucketsDb.createKeyStream()) { + const { + bucketIndex, + bucketKey, + nodeId + } = nodesUtils.parseBucketsDbKey(key as Buffer); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('parse NodeGraph lastUpdated buckets db key', async () => { + const lastUpdatedDb = await db.level('lastUpdated'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = lexi.pack(bucketIndex, 'hex'); + const lastUpdated = utils.getUnixtime(); + const nodeId = testNodesUtils.generateRandomNodeId(); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey(lastUpdated, nodeId); + data.push({ + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + key: Buffer.concat([ + Buffer.from(bucketKey), + lastUpdatedKey + ]) + }); + const lastUpdatedDomain = ['lastUpdated', bucketKey]; + await db.put( + lastUpdatedDomain, + lastUpdatedKey, + null + ); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of + // the bucket key and last updated and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of lastUpdatedDb.createKeyStream()) { + const { + bucketIndex, + bucketKey, + lastUpdated, + nodeId + } = nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(lastUpdated).toBe(data[i].lastUpdated); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } }); }); diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index fca9ad53bf..e6c603e14f 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,9 +1,27 @@ import type { NodeId, NodeAddress } from '@/nodes/types'; - import type PolykeyAgent from '@/PolykeyAgent'; import { IdInternal } from '@matrixai/id'; +import * as keysUtils from '@/keys/utils'; import { bigInt2Bytes } from '@/utils'; +/** + * Generate random `NodeId` + * If `readable` is `true`, then it will generate a `NodeId` where + * its binary string form will only contain hex characters + * However the `NodeId` will not be uniformly random as it will not cover + * the full space of possible node IDs + * Prefer to keep `readable` `false` if possible to ensure tests are robust + */ +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + /** * Generate a deterministic NodeId for a specific bucket given an existing NodeId * This requires solving the bucket index (`i`) and distance equation: @@ -61,4 +79,4 @@ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { } as NodeAddress); } -export { generateNodeIdForBucket, nodesConnect }; +export { generateRandomNodeId, generateNodeIdForBucket, nodesConnect }; diff --git a/tests/notifications/utils.test.ts b/tests/notifications/utils.test.ts index 5a3b8a617b..fa6373e380 100644 --- a/tests/notifications/utils.test.ts +++ b/tests/notifications/utils.test.ts @@ -2,16 +2,15 @@ import type { Notification, NotificationData } from '@/notifications/types'; import type { VaultActions, VaultName } from '@/vaults/types'; import { createPublicKey } from 'crypto'; import { EmbeddedJWK, jwtVerify, exportJWK } from 'jose'; - import * as keysUtils from '@/keys/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Notifications utils', () => { - const nodeId = testUtils.generateRandomNodeId(); + const nodeId = testNodesUtils.generateRandomNodeId(); const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const vaultId = vaultsUtils.generateVaultId(); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); @@ -206,7 +205,7 @@ describe('Notifications utils', () => { }); test('validates correct notifications', async () => { - const nodeIdOther = testUtils.generateRandomNodeId(); + const nodeIdOther = testNodesUtils.generateRandomNodeId(); const nodeIdOtherEncoded = nodesUtils.encodeNodeId(nodeIdOther); const generalNotification: Notification = { data: { diff --git a/tests/sigchain/Sigchain.test.ts b/tests/sigchain/Sigchain.test.ts index b6ff170ef5..c138e0a94d 100644 --- a/tests/sigchain/Sigchain.test.ts +++ b/tests/sigchain/Sigchain.test.ts @@ -10,8 +10,9 @@ import { KeyManager, utils as keysUtils } from '@/keys'; import { Sigchain } from '@/sigchain'; import * as claimsUtils from '@/claims/utils'; import * as sigchainErrors from '@/sigchain/errors'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Sigchain', () => { const logger = new Logger('Sigchain Test', LogLevel.WARN, [ @@ -19,25 +20,25 @@ describe('Sigchain', () => { ]); const password = 'password'; const srcNodeIdEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId2Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId3Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdAEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdBEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdCEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdDEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); let mockedGenerateKeyPair: jest.SpyInstance; @@ -325,7 +326,7 @@ describe('Sigchain', () => { // Add 10 claims for (let i = 1; i <= 5; i++) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()); node2s.push(node2); const nodeLink: ClaimData = { type: 'node', @@ -374,7 +375,7 @@ describe('Sigchain', () => { for (let i = 1; i <= 30; i++) { // If even, add a node link if (i % 2 === 0) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()); nodes[i] = node2; const nodeLink: ClaimData = { type: 'node', diff --git a/tests/status/Status.test.ts b/tests/status/Status.test.ts index 311f89a11c..0b0744002d 100644 --- a/tests/status/Status.test.ts +++ b/tests/status/Status.test.ts @@ -6,15 +6,15 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import config from '@/config'; import { Status, errors as statusErrors } from '@/status'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Status', () => { const logger = new Logger(`${Status.name} Test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeId1 = testUtils.generateRandomNodeId(); - const nodeId2 = testUtils.generateRandomNodeId(); - const nodeId3 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + const nodeId3 = testNodesUtils.generateRandomNodeId(); let dataDir: string; beforeEach(async () => { dataDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'status-test-')); diff --git a/tests/utils.test.ts b/tests/utils.test.ts index 5f6ee891e0..ca3f01a325 100644 --- a/tests/utils.test.ts +++ b/tests/utils.test.ts @@ -258,4 +258,116 @@ describe('utils', () => { expect(acquireOrder).toStrictEqual([lock1, lock2]); expect(releaseOrder).toStrictEqual([lock2, lock1]); }); + test.only('splitting buffers', () => { + const s1 = ''; + expect(s1.split('')).toStrictEqual([]); + const b1 = Buffer.from(s1); + expect(utils.bufferSplit(b1)).toStrictEqual([]); + + const s2 = '!'; + expect(s2.split('!')).toStrictEqual(['', '']); + const b2 = Buffer.from(s2); + expect(utils.bufferSplit( + b2, + Buffer.from('!') + )).toStrictEqual([Buffer.from(''), Buffer.from('')]); + + const s3 = '!a'; + expect(s3.split('!')).toStrictEqual(['', 'a']); + const b3 = Buffer.from(s3); + expect(utils.bufferSplit( + b3, + Buffer.from('!') + )).toStrictEqual([Buffer.from(''), Buffer.from('a')]); + + const s4 = 'a!'; + expect(s4.split('!')).toStrictEqual(['a', '']); + const b4 = Buffer.from(s4); + expect(utils.bufferSplit( + b4, + Buffer.from('!') + )).toStrictEqual([Buffer.from('a'), Buffer.from('')]); + + const s5 = 'a!b'; + expect(s5.split('!')).toStrictEqual(['a', 'b']); + const b5 = Buffer.from(s5); + expect(utils.bufferSplit( + b5, + Buffer.from('!') + )).toStrictEqual([Buffer.from('a'), Buffer.from('b')]); + + const s6 = '!a!b'; + expect(s6.split('!')).toStrictEqual(['', 'a', 'b']); + const b6 = Buffer.from(s6); + expect(utils.bufferSplit( + b6, + Buffer.from('!') + )).toStrictEqual([Buffer.from(''), Buffer.from('a'), Buffer.from('b')]); + + const s7 = 'a!b!'; + expect(s7.split('!')).toStrictEqual(['a', 'b', '']); + const b7 = Buffer.from(s7); + expect(utils.bufferSplit( + b7, + Buffer.from('!') + )).toStrictEqual([Buffer.from('a'), Buffer.from('b'), Buffer.from('')]); + + const s8 = '!a!b!'; + expect(s8.split('!')).toStrictEqual(['', 'a', 'b', '']); + const b8 = Buffer.from(s8); + expect(utils.bufferSplit( + b8, + Buffer.from('!') + )).toStrictEqual([Buffer.from(''), Buffer.from('a'), Buffer.from('b'), Buffer.from('')]); + + const s9 = '!a!b!'; + expect(s8.split('!', 2)).toStrictEqual(['', 'a']); + expect(s8.split('!', 3)).toStrictEqual(['', 'a', 'b']); + expect(s8.split('!', 4)).toStrictEqual(['', 'a', 'b', '']); + const b9 = Buffer.from(s9); + expect(utils.bufferSplit(b9, Buffer.from('!'), 2)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a') + ]); + expect(utils.bufferSplit(b9, Buffer.from('!'), 3)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b') + ]); + expect(utils.bufferSplit(b9, Buffer.from('!'), 4)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b'), + Buffer.from('') + ]); + + const s10 = 'abcd'; + expect(s10.split('')).toStrictEqual(['a', 'b', 'c', 'd']); + const b10 = Buffer.from(s10); + expect(utils.bufferSplit(b10)).toStrictEqual([ + Buffer.from('a'), + Buffer.from('b'), + Buffer.from('c'), + Buffer.from('d'), + ]); + + // Splitting while concatenating the remaining chunk + const b11 = Buffer.from('!a!b!'); + expect(utils.bufferSplit(b11, Buffer.from('!'), 3, true)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b!') + ]); + const b12 = Buffer.from('!ab!cd!e!!!!'); + expect(utils.bufferSplit( + b12, + Buffer.from('!'), + 3, + true + )).toStrictEqual([ + Buffer.from(''), + Buffer.from('ab'), + Buffer.from('cd!e!!!!') + ]); + }); }); diff --git a/tests/utils.ts b/tests/utils.ts index 3f446b4656..f86a7084df 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -1,11 +1,9 @@ import type { StatusLive } from '@/status/types'; -import type { NodeId } from '@/nodes/types'; import type { Host } from '@/network/types'; import path from 'path'; import fs from 'fs'; import lock from 'fd-lock'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { IdInternal } from '@matrixai/id'; import PolykeyAgent from '@/PolykeyAgent'; import Status from '@/status/Status'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -70,118 +68,118 @@ async function setupGlobalKeypair() { } } -/** - * Setup the global agent - * Use this in beforeAll, and use the closeGlobalAgent in afterAll - * This is expected to be executed by multiple worker processes - * Uses a references directory as a reference count - * Uses fd-lock to serialise access - * This means all test modules using this will be serialised - * Any beforeAll must use globalThis.maxTimeout - * Tips for usage: - * * Do not restart this global agent - * * Ensure client-side side-effects are removed at the end of each test - * * Ensure server-side side-effects are removed at the end of each test - */ +// FIXME: what is going on here? is this getting removed? +// /** +// * Setup the global agent +// * Use this in beforeAll, and use the closeGlobalAgent in afterAll +// * This is expected to be executed by multiple worker processes +// * Uses a references directory as a reference count +// * Uses fd-lock to serialise access +// * This means all test modules using this will be serialised +// * Any beforeAll must use globalThis.maxTimeout +// * Tips for usage: +// * * Do not restart this global agent +// * * Ensure client-side side-effects are removed at the end of each test +// * * Ensure server-side side-effects are removed at the end of each test +// */ async function setupGlobalAgent( logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), -) { - const globalAgentPassword = 'password'; - const globalAgentDir = path.join(globalThis.dataDir, 'agent'); - // The references directory will act like our reference count - await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { - recursive: true, - }); - const pid = process.pid.toString(); - // Plus 1 to the reference count - await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); - const globalAgentLock = await fs.promises.open( - path.join(globalThis.dataDir, 'agent.lock'), - fs.constants.O_WRONLY | fs.constants.O_CREAT, - ); - while (!lock(globalAgentLock.fd)) { - await sleep(1000); - } - const status = new Status({ - statusPath: path.join(globalAgentDir, config.defaults.statusBase), - statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), - fs, - }); - let statusInfo = await status.readStatus(); - if (statusInfo == null || statusInfo.status === 'DEAD') { - await PolykeyAgent.createPolykeyAgent({ - password: globalAgentPassword, - nodePath: globalAgentDir, - networkConfig: { - proxyHost: '127.0.0.1' as Host, - forwardHost: '127.0.0.1' as Host, - agentHost: '127.0.0.1' as Host, - clientHost: '127.0.0.1' as Host, - }, - keysConfig: { - rootKeyPairBits: 2048, - }, - seedNodes: {}, // Explicitly no seed nodes on startup - logger, - }); - statusInfo = await status.readStatus(); - } - return { - globalAgentDir, - globalAgentPassword, - globalAgentStatus: statusInfo as StatusLive, - globalAgentClose: async () => { - // Closing the global agent cannot be done in the globalTeardown - // This is due to a sequence of reasons: - // 1. The global agent is not started as a separate process - // 2. Because we need to be able to mock dependencies - // 3. This means it is part of a jest worker process - // 4. Which will block termination of the jest worker process - // 5. Therefore globalTeardown will never get to execute - // 6. The global agent is not part of globalSetup - // 7. Because not all tests need the global agent - // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes - try { - await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); - // If the references directory is not empty - // there are other processes still using the global agent - try { - await fs.promises.rmdir(path.join(globalAgentDir, 'references')); - } catch (e) { - if (e.code === 'ENOTEMPTY') { - return; - } - throw e; - } - // Stopping may occur in a different jest worker process - // therefore we cannot rely on pkAgent, but instead use GRPC - const statusInfo = (await status.readStatus()) as StatusLive; - const grpcClient = await GRPCClientClient.createGRPCClientClient({ - nodeId: statusInfo.data.nodeId, - host: statusInfo.data.clientHost, - port: statusInfo.data.clientPort, - tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, - logger, - }); - const emptyMessage = new utilsPB.EmptyMessage(); - const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); - // This is asynchronous - await grpcClient.agentStop(emptyMessage, meta); - await grpcClient.destroy(); - await status.waitFor('DEAD'); - } finally { - lock.unlock(globalAgentLock.fd); - await globalAgentLock.close(); - } - }, - }; -} - -function generateRandomNodeId(): NodeId { - const random = keysUtils.getRandomBytesSync(16).toString('hex'); - return IdInternal.fromString(random); +): Promise { + throw Error('not implemented'); +// const globalAgentPassword = 'password'; +// const globalAgentDir = path.join(globalThis.dataDir, 'agent'); +// // The references directory will act like our reference count +// await fs.promises.mkdir(path.join(globalAgentDir, 'references'), { +// recursive: true, +// }); +// const pid = process.pid.toString(); +// // Plus 1 to the reference count +// await fs.promises.writeFile(path.join(globalAgentDir, 'references', pid), ''); +// const globalAgentLock = await fs.promises.open( +// path.join(globalThis.dataDir, 'agent.lock'), +// fs.constants.O_WRONLY | fs.constants.O_CREAT, +// ); +// while (!lock(globalAgentLock.fd)) { +// await sleep(1000); +// } +// const status = new Status({ +// statusPath: path.join(globalAgentDir, config.defaults.statusBase), +// statusLockPath: path.join(globalAgentDir, config.defaults.statusLockBase), +// fs, +// }); +// let statusInfo = await status.readStatus(); +// if (statusInfo == null || statusInfo.status === 'DEAD') { +// await PolykeyAgent.createPolykeyAgent({ +// password: globalAgentPassword, +// nodePath: globalAgentDir, +// networkConfig: { +// proxyHost: '127.0.0.1' as Host, +// forwardHost: '127.0.0.1' as Host, +// agentHost: '127.0.0.1' as Host, +// clientHost: '127.0.0.1' as Host, +// }, +// keysConfig: { +// rootKeyPairBits: 2048, +// }, +// seedNodes: {}, // Explicitly no seed nodes on startup +// logger, +// }); +// statusInfo = await status.readStatus(); +// } +// return { +// globalAgentDir, +// globalAgentPassword, +// globalAgentStatus: statusInfo as StatusLive, +// globalAgentClose: async () => { +// // Closing the global agent cannot be done in the globalTeardown +// // This is due to a sequence of reasons: +// // 1. The global agent is not started as a separate process +// // 2. Because we need to be able to mock dependencies +// // 3. This means it is part of a jest worker process +// // 4. Which will block termination of the jest worker process +// // 5. Therefore globalTeardown will never get to execute +// // 6. The global agent is not part of globalSetup +// // 7. Because not all tests need the global agent +// // 8. Therefore setupGlobalAgent is lazy and executed by jest worker processes +// try { +// await fs.promises.rm(path.join(globalAgentDir, 'references', pid)); +// // If the references directory is not empty +// // there are other processes still using the global agent +// try { +// await fs.promises.rmdir(path.join(globalAgentDir, 'references')); +// } catch (e) { +// if (e.code === 'ENOTEMPTY') { +// return; +// } +// throw e; +// } +// // Stopping may occur in a different jest worker process +// // therefore we cannot rely on pkAgent, but instead use GRPC +// const statusInfo = (await status.readStatus()) as StatusLive; +// const grpcClient = await GRPCClientClient.createGRPCClientClient({ +// nodeId: statusInfo.data.nodeId, +// host: statusInfo.data.clientHost, +// port: statusInfo.data.clientPort, +// tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, +// logger, +// }); +// const emptyMessage = new utilsPB.EmptyMessage(); +// const meta = clientUtils.encodeAuthFromPassword(globalAgentPassword); +// // This is asynchronous +// await grpcClient.agentStop(emptyMessage, meta); +// await grpcClient.destroy(); +// await status.waitFor('DEAD'); +// } finally { +// lock.unlock(globalAgentLock.fd); +// await globalAgentLock.close(); +// } +// }, +// }; } -export { setupGlobalKeypair, setupGlobalAgent, generateRandomNodeId }; +export { + setupGlobalKeypair, + setupGlobalAgent +}; diff --git a/tests/vaults/VaultOps.test.ts b/tests/vaults/VaultOps.test.ts index e376eb306d..b7bf5a753c 100644 --- a/tests/vaults/VaultOps.test.ts +++ b/tests/vaults/VaultOps.test.ts @@ -14,6 +14,7 @@ import * as vaultOps from '@/vaults/VaultOps'; import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('VaultOps', () => { const logger = new Logger('VaultOps', LogLevel.WARN, [new StreamHandler()]); @@ -28,7 +29,7 @@ describe('VaultOps', () => { let vaultsDbDomain: DBDomain; const dummyKeyManager = { getNodeId: () => { - return testUtils.generateRandomNodeId(); + return testNodesUtils.generateRandomNodeId(); }, } as KeyManager;