diff --git a/.eslintrc b/.eslintrc index f66c592e8..a26102885 100644 --- a/.eslintrc +++ b/.eslintrc @@ -113,6 +113,7 @@ "@typescript-eslint/no-misused-promises": ["error", { "checksVoidReturn": false }], + "@typescript-eslint/await-thenable": ["error"], "@typescript-eslint/naming-convention": [ "error", { diff --git a/package-lock.json b/package-lock.json index eca4f5a4d..94b1c92a1 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1593,31 +1593,21 @@ } }, "@matrixai/db": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@matrixai/db/-/db-1.1.5.tgz", - "integrity": "sha512-zPpP/J1A3TLRaQKaGa5smualzjW4Rin4K48cpU5/9ThyXfpVBBp/mrkbDfjL/O5z6YTcuGVf2+yLck8tF8kVUw==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@matrixai/db/-/db-1.2.1.tgz", + "integrity": "sha512-1W8TORmRX3q3NugZFn0FTgI0mo/n0nWBTXHKXwwPfxtdyNfi18JCj3HVCwWdToOo87ypnS/mqLDIUTSHbF7F3Q==", "requires": { "@matrixai/async-init": "^1.6.0", - "@matrixai/logger": "^2.0.1", - "@matrixai/workers": "^1.2.3", - "abstract-leveldown": "^7.0.0", + "@matrixai/logger": "^2.1.0", + "@matrixai/workers": "^1.2.5", + "abstract-leveldown": "^7.2.0", "async-mutex": "^0.3.1", "level": "7.0.1", - "levelup": "^5.0.1", + "levelup": "^5.1.1", "sublevel-prefixer": "^1.0.0", - "subleveldown": "^5.0.1", + "subleveldown": "^6.0.1", "threads": "^1.6.5", "ts-custom-error": "^3.2.0" - }, - "dependencies": { - "async-mutex": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.3.2.tgz", - "integrity": "sha512-HuTK7E7MT7jZEh1P9GtRW9+aTWiDWWi9InbZ5hjxrnRa39KS4BW04+xLBhYNS2aXhHUIKZSw3gj4Pn1pj+qGAA==", - "requires": { - "tslib": "^2.3.1" - } - } } }, "@matrixai/id": { @@ -1718,6 +1708,12 @@ "integrity": "sha512-eZxlbI8GZscaGS7kkc/trHTT5xgrjH3/1n2JDwusC9iahPKWMRvRjJSAN5mCXviuTGQ/lHnhvv8Q1YTpnfz9gA==", "dev": true }, + "@types/abstract-leveldown": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/@types/abstract-leveldown/-/abstract-leveldown-7.2.0.tgz", + "integrity": "sha512-q5veSX6zjUy/DlDhR4Y4cU0k2Ar+DT2LUraP00T19WLmTO6Se1djepCCaqU6nQrwcJ5Hyo/CWqxTzrrFg8eqbQ==", + "dev": true + }, "@types/babel__core": { "version": "7.1.16", "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.1.16.tgz", @@ -1768,6 +1764,16 @@ "@types/node": "*" } }, + "@types/encoding-down": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/@types/encoding-down/-/encoding-down-5.0.0.tgz", + "integrity": "sha512-G0MlS/+/U2RIQLcSEhhAcoMrXw3hXUCFSKbhbeEljoKMra2kq+NPX6tfOveSWQLX2hJXBo+YrvKgAGe+tFL1Aw==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/level-codec": "*" + } + }, "@types/google-protobuf": { "version": "3.15.5", "resolved": "https://registry.npmjs.org/@types/google-protobuf/-/google-protobuf-3.15.5.tgz", @@ -1829,6 +1835,40 @@ "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=", "dev": true }, + "@types/level": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/@types/level/-/level-6.0.0.tgz", + "integrity": "sha512-NjaUpukKfCvnV4Wk0jUaodFi2/66HxgpYghc2aV8iP+zk2NMt/9ps1eVlifqOU/+eLzMlDIY69NWkbPaAstukQ==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/encoding-down": "*", + "@types/levelup": "*" + } + }, + "@types/level-codec": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/@types/level-codec/-/level-codec-9.0.1.tgz", + "integrity": "sha512-6z7DSlBsmbax3I/bV1Q6jT1nKquDjFl95LURVThdKtwILkRawLYtXdINW19xM95N5kqN2detWb2iGrbUlPwNyw==", + "dev": true + }, + "@types/level-errors": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/level-errors/-/level-errors-3.0.0.tgz", + "integrity": "sha512-/lMtoq/Cf/2DVOm6zE6ORyOM+3ZVm/BvzEZVxUhf6bgh8ZHglXlBqxbxSlJeVp8FCbD3IVvk/VbsaNmDjrQvqQ==", + "dev": true + }, + "@types/levelup": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@types/levelup/-/levelup-5.1.0.tgz", + "integrity": "sha512-XagSD3VJFWjZWeQnG4mL53PFRPmb6E7dKXdJxexVw85ki82BWOp68N+R6M1t9OYsbmlY+2S0GZcZtVH3gGbeDw==", + "dev": true, + "requires": { + "@types/abstract-leveldown": "*", + "@types/level-errors": "*", + "@types/node": "*" + } + }, "@types/nexpect": { "version": "0.4.31", "resolved": "https://registry.npmjs.org/@types/nexpect/-/nexpect-0.4.31.tgz", @@ -1844,9 +1884,9 @@ "integrity": "sha512-94+Ahf9IcaDuJTle/2b+wzvjmutxXAEXU6O81JHblYXUg2BDG+dnBy7VxIPHKAyEEDHzCMQydTJuWvrE+Aanzw==" }, "@types/node-forge": { - "version": "0.9.10", - "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.9.10.tgz", - "integrity": "sha512-+BbPlhZeYs/WETWftQi2LeRx9VviWSwawNo+Pid5qNrSZHb60loYjpph3OrbwXMMseadu9rE9NeK34r4BHT+QQ==", + "version": "0.10.10", + "resolved": "https://registry.npmjs.org/@types/node-forge/-/node-forge-0.10.10.tgz", + "integrity": "sha512-iixn5bedlE9fm/5mN7fPpXraXlxCVrnNWHZekys8c5fknridLVWGnNRqlaWpenwaijIuB3bNI0lEOm+JD6hZUA==", "dev": true, "requires": { "@types/node": "*" @@ -2681,9 +2721,9 @@ "dev": true }, "caniuse-lite": { - "version": "1.0.30001269", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001269.tgz", - "integrity": "sha512-UOy8okEVs48MyHYgV+RdW1Oiudl1H6KolybD6ZquD0VcrPSgj25omXO1S7rDydjpqaISCwA8Pyx+jUQKZwWO5w==", + "version": "1.0.30001332", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001332.tgz", + "integrity": "sha512-10T30NYOEQtN6C11YGg411yebhvpnC6Z102+B95eAsN0oB6KUs01ivE8u+G6FMIRtIrVlYXhL+LUwQ3/hXwDWw==", "dev": true }, "canonicalize": { @@ -2701,12 +2741,9 @@ } }, "catering": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/catering/-/catering-2.1.0.tgz", - "integrity": "sha512-M5imwzQn6y+ODBfgi+cfgZv2hIUI6oYU/0f35Mdb1ujGeqeoI5tOnl9Q13DTH7LW+7er+NYq8stNOKZD/Z3U/A==", - "requires": { - "queue-tick": "^1.0.0" - } + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/catering/-/catering-2.1.1.tgz", + "integrity": "sha512-K7Qy8O9p76sL3/3m7/zLKbRkyOlSZAgzEaLhyj2mXS8PsCud2Eo4hAb8aLtZqHh0QGqLcb9dlJSu6lHRVENm1w==" }, "chalk": { "version": "2.4.2", @@ -4811,11 +4848,6 @@ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.1.8.tgz", "integrity": "sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw==" }, - "immediate": { - "version": "3.3.0", - "resolved": "https://registry.npmjs.org/immediate/-/immediate-3.3.0.tgz", - "integrity": "sha512-HR7EVodfFUdQCTIeySw+WDRFJlPcLOJbXfwwZ7Oom6tjsvZ3bOkCDJHehQC3nxJrv7+f9XecwazynjU8e4Vw3Q==" - }, "import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", @@ -6936,6 +6968,11 @@ } } }, + "node-abort-controller": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.0.1.tgz", + "integrity": "sha512-/ujIVxthRs+7q6hsdjHMaj8hRG9NuWmwrz+JdRwZ14jdFoKSkm+vDsCbF9PLpnSqjaWQJuTmVtcWHNLr+vrOFw==" + }, "node-fetch": { "version": "2.6.1", "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.1.tgz", @@ -7649,11 +7686,6 @@ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==" }, - "queue-tick": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/queue-tick/-/queue-tick-1.0.0.tgz", - "integrity": "sha512-ULWhjjE8BmiICGn3G8+1L9wFpERNxkf8ysxkAer4+TFdRefDaXOCV5m92aMB9FtBVmn/8sETXLXY6BfW7hyaWQ==" - }, "ramda": { "version": "0.27.1", "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.27.1.tgz", @@ -8645,124 +8677,16 @@ "integrity": "sha1-TuRZ72Y6yFvyj8ZJ17eWX9ppEHM=" }, "subleveldown": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/subleveldown/-/subleveldown-5.0.1.tgz", - "integrity": "sha512-cVqd/URpp7si1HWu5YqQ3vqQkjuolAwHypY1B4itPlS71/lsf6TQPZ2Y0ijT22EYVkvH5ove9JFJf4u7VGPuZw==", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/subleveldown/-/subleveldown-6.0.1.tgz", + "integrity": "sha512-Cnf+cn2wISXU2xflY1SFIqfX4hG2d6lFk2P5F8RDQLmiqN9Ir4ExNfUFH6xnmizMseM/t+nMsDUKjN9Kw6ShFA==", "requires": { - "abstract-leveldown": "^6.3.0", - "encoding-down": "^6.2.0", + "abstract-leveldown": "^7.2.0", + "encoding-down": "^7.1.0", "inherits": "^2.0.3", "level-option-wrap": "^1.1.0", - "levelup": "^4.4.0", + "levelup": "^5.1.1", "reachdown": "^1.1.0" - }, - "dependencies": { - "abstract-leveldown": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.3.0.tgz", - "integrity": "sha512-TU5nlYgta8YrBMNpc9FwQzRbiXsj49gsALsXadbGHt9CROPzX5fB0rWDR5mtdpOOKa5XqRFpbj1QroPAoPzVjQ==", - "requires": { - "buffer": "^5.5.0", - "immediate": "^3.2.3", - "level-concat-iterator": "~2.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - }, - "buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "requires": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "deferred-leveldown": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/deferred-leveldown/-/deferred-leveldown-5.3.0.tgz", - "integrity": "sha512-a59VOT+oDy7vtAbLRCZwWgxu2BaCfd5Hk7wxJd48ei7I+nsg8Orlb9CLG0PMZienk9BSUKgeAqkO2+Lw+1+Ukw==", - "requires": { - "abstract-leveldown": "~6.2.1", - "inherits": "^2.0.3" - }, - "dependencies": { - "abstract-leveldown": { - "version": "6.2.3", - "resolved": "https://registry.npmjs.org/abstract-leveldown/-/abstract-leveldown-6.2.3.tgz", - "integrity": "sha512-BsLm5vFMRUrrLeCcRc+G0t2qOaTzpoJQLOubq2XM72eNpjF5UdU5o/5NvlNhx95XHcAvcl8OMXr4mlg/fRgUXQ==", - "requires": { - "buffer": "^5.5.0", - "immediate": "^3.2.3", - "level-concat-iterator": "~2.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - } - } - }, - "encoding-down": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/encoding-down/-/encoding-down-6.3.0.tgz", - "integrity": "sha512-QKrV0iKR6MZVJV08QY0wp1e7vF6QbhnbQhb07bwpEyuz4uZiZgPlEGdkCROuFkUwdxlFaiPIhjyarH1ee/3vhw==", - "requires": { - "abstract-leveldown": "^6.2.1", - "inherits": "^2.0.3", - "level-codec": "^9.0.0", - "level-errors": "^2.0.0" - } - }, - "level-codec": { - "version": "9.0.2", - "resolved": "https://registry.npmjs.org/level-codec/-/level-codec-9.0.2.tgz", - "integrity": "sha512-UyIwNb1lJBChJnGfjmO0OR+ezh2iVu1Kas3nvBS/BzGnx79dv6g7unpKIDNPMhfdTEGoc7mC8uAu51XEtX+FHQ==", - "requires": { - "buffer": "^5.6.0" - } - }, - "level-concat-iterator": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/level-concat-iterator/-/level-concat-iterator-2.0.1.tgz", - "integrity": "sha512-OTKKOqeav2QWcERMJR7IS9CUo1sHnke2C0gkSmcR7QuEtFNLLzHQAvnMw8ykvEcv0Qtkg0p7FOwP1v9e5Smdcw==" - }, - "level-errors": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/level-errors/-/level-errors-2.0.1.tgz", - "integrity": "sha512-UVprBJXite4gPS+3VznfgDSU8PTRuVX0NXwoWW50KLxd2yw4Y1t2JUR5In1itQnudZqRMT9DlAM3Q//9NCjCFw==", - "requires": { - "errno": "~0.1.1" - } - }, - "level-iterator-stream": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/level-iterator-stream/-/level-iterator-stream-4.0.2.tgz", - "integrity": "sha512-ZSthfEqzGSOMWoUGhTXdX9jv26d32XJuHz/5YnuHZzH6wldfWMOVwI9TBtKcya4BKTyTt3XVA0A3cF3q5CY30Q==", - "requires": { - "inherits": "^2.0.4", - "readable-stream": "^3.4.0", - "xtend": "^4.0.2" - } - }, - "level-supports": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/level-supports/-/level-supports-1.0.1.tgz", - "integrity": "sha512-rXM7GYnW8gsl1vedTJIbzOrRv85c/2uCMpiiCzO2fndd06U/kUXEEU9evYn4zFggBOg36IsBW8LzqIpETwwQzg==", - "requires": { - "xtend": "^4.0.2" - } - }, - "levelup": { - "version": "4.4.0", - "resolved": "https://registry.npmjs.org/levelup/-/levelup-4.4.0.tgz", - "integrity": "sha512-94++VFO3qN95cM/d6eBXvd894oJE0w3cInq9USsyQzzoJxmiYzPAocNcuGCPGGjoXqDVJcr3C1jzt1TSjyaiLQ==", - "requires": { - "deferred-leveldown": "~5.3.0", - "level-errors": "~2.0.0", - "level-iterator-stream": "~4.0.0", - "level-supports": "~1.0.0", - "xtend": "~4.0.0" - } - } } }, "supports-color": { @@ -9615,11 +9539,6 @@ "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", "dev": true }, - "xtend": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", - "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==" - }, "y18n": { "version": "5.0.8", "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", diff --git a/package.json b/package.json index cf62e9042..5e5dcaacf 100644 --- a/package.json +++ b/package.json @@ -73,7 +73,7 @@ "dependencies": { "@grpc/grpc-js": "1.3.7", "@matrixai/async-init": "^1.6.0", - "@matrixai/db": "^1.1.5", + "@matrixai/db": "^1.2.1", "@matrixai/id": "^3.3.2", "@matrixai/logger": "^2.1.0", "@matrixai/workers": "^1.2.5", @@ -94,6 +94,7 @@ "jose": "^4.3.6", "lexicographic-integer": "^1.1.0", "multiformats": "^9.4.8", + "node-abort-controller": "^3.0.1", "node-forge": "^0.10.0", "pako": "^1.0.11", "prompts": "^2.4.1", @@ -109,9 +110,10 @@ "@types/cross-spawn": "^6.0.2", "@types/google-protobuf": "^3.7.4", "@types/jest": "^26.0.20", + "@types/level": "^6.0.0", "@types/nexpect": "^0.4.31", "@types/node": "^14.14.35", - "@types/node-forge": "^0.9.7", + "@types/node-forge": "^0.10.4", "@types/pako": "^1.0.2", "@types/prompts": "^2.0.13", "@types/readable-stream": "^2.3.11", diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index ba9cafc37..1bae63c22 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -1,6 +1,6 @@ import type { FileSystem } from './types'; import type { PolykeyWorkerManagerInterface } from './workers/types'; -import type { Host, Port } from './network/types'; +import type { ConnectionData, Host, Port } from './network/types'; import type { SeedNodes } from './nodes/types'; import type { KeyManagerChangeData } from './keys/types'; import path from 'path'; @@ -8,6 +8,8 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; +import Queue from './nodes/Queue'; +import * as networkUtils from './network/utils'; import { KeyManager, utils as keysUtils } from './keys'; import { Status } from './status'; import { Schema } from './schema'; @@ -55,8 +57,10 @@ class PolykeyAgent { */ public static readonly eventSymbols = { [KeyManager.name]: Symbol(KeyManager.name), + [Proxy.name]: Symbol(Proxy.name), } as { readonly KeyManager: unique symbol; + readonly Proxy: unique symbol; }; public static async createPolykeyAgent({ @@ -80,6 +84,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -125,6 +130,7 @@ class PolykeyAgent { gestaltGraph?: GestaltGraph; proxy?: Proxy; nodeGraph?: NodeGraph; + queue?: Queue; nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; @@ -262,6 +268,8 @@ class PolykeyAgent { proxy ?? new Proxy({ ...proxyConfig_, + connectionEstablishedCallback: (data) => + events.emitAsync(PolykeyAgent.eventSymbols.Proxy, data), logger: logger.getChild(Proxy.name), }); nodeGraph = @@ -272,12 +280,18 @@ class PolykeyAgent { keyManager, logger: logger.getChild(NodeGraph.name), })); + queue = + queue ?? + new Queue({ + logger: logger.getChild(Queue.name), + }); nodeConnectionManager = nodeConnectionManager ?? new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, ...nodeConnectionManagerConfig_, logger: logger.getChild(NodeConnectionManager.name), @@ -290,8 +304,10 @@ class PolykeyAgent { keyManager, nodeGraph, nodeConnectionManager, + queue, logger: logger.getChild(NodeManager.name), }); + await nodeManager.start(); discovery = discovery ?? (await Discovery.createDiscovery({ @@ -375,6 +391,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -407,6 +424,7 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly proxy: Proxy; public readonly nodeGraph: NodeGraph; + public readonly queue: Queue; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; @@ -432,6 +450,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, + queue, nodeConnectionManager, nodeManager, discovery, @@ -455,6 +474,7 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; proxy: Proxy; nodeGraph: NodeGraph; + queue: Queue; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; @@ -480,6 +500,7 @@ class PolykeyAgent { this.proxy = proxy; this.discovery = discovery; this.nodeGraph = nodeGraph; + this.queue = queue; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.vaultManager = vaultManager; @@ -527,7 +548,7 @@ class PolykeyAgent { await this.status.updateStatusLive({ nodeId: data.nodeId, }); - await this.nodeManager.refreshBuckets(); + await this.nodeManager.resetBuckets(); const tlsConfig = { keyPrivatePem: keysUtils.privateKeyToPem( data.rootKeyPair.privateKey, @@ -539,6 +560,31 @@ class PolykeyAgent { this.logger.info(`${KeyManager.name} change propagated`); }, ); + this.events.on( + PolykeyAgent.eventSymbols.Proxy, + async (data: ConnectionData) => { + if (data.type === 'reverse') { + const address = networkUtils.buildAddress( + data.remoteHost, + data.remotePort, + ); + const nodeIdEncoded = nodesUtils.encodeNodeId(data.remoteNodeId); + this.logger.info( + `Reverse connection adding ${nodeIdEncoded}:${address} to ${NodeGraph.name}`, + ); + // Reverse connection was established and authenticated, + // add it to the node graph + await this.nodeManager.setNode( + data.remoteNodeId, + { + host: data.remoteHost, + port: data.remotePort, + }, + false, + ); + } + }, + ); const networkConfig_ = { ...config.defaults.networkConfig, ...utils.filterEmptyObject(networkConfig), @@ -613,9 +659,11 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); - await this.nodeConnectionManager.start(); + await this.queue.start(); + await this.nodeManager.start(); + await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); - await this.nodeConnectionManager.syncNodeGraph(); + await this.nodeConnectionManager.syncNodeGraph(false); await this.discovery.start({ fresh }); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); @@ -668,6 +716,8 @@ class PolykeyAgent { await this.discovery.stop(); await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); + await this.nodeManager.stop(); + await this.queue.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); diff --git a/src/PolykeyClient.ts b/src/PolykeyClient.ts index b124feefa..bea2b830b 100644 --- a/src/PolykeyClient.ts +++ b/src/PolykeyClient.ts @@ -1,4 +1,4 @@ -import type { FileSystem } from './types'; +import type { FileSystem, Timer } from './types'; import type { NodeId } from './nodes/types'; import type { Host, Port } from './network/types'; @@ -29,7 +29,7 @@ class PolykeyClient { nodePath = config.defaults.nodePath, session, grpcClient, - timeout, + timer, fs = require('fs'), logger = new Logger(this.name), fresh = false, @@ -38,7 +38,7 @@ class PolykeyClient { host: Host; port: Port; nodePath?: string; - timeout?: number; + timer?: Timer; session?: Session; grpcClient?: GRPCClientClient; fs?: FileSystem; @@ -66,7 +66,7 @@ class PolykeyClient { port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, session, - timeout, + timer, logger: logger.getChild(GRPCClientClient.name), })); const pkClient = new PolykeyClient({ diff --git a/src/acl/ACL.ts b/src/acl/ACL.ts index 358663d51..3f8e9d5b6 100644 --- a/src/acl/ACL.ts +++ b/src/acl/ACL.ts @@ -341,7 +341,7 @@ class ACL { ); const ops: Array = []; if (permId == null) { - const permId = await this.generatePermId(); + const permId = this.generatePermId(); const permRef = { count: 1, object: { @@ -554,7 +554,7 @@ class ACL { }); } } - const permId = await this.generatePermId(); + const permId = this.generatePermId(); const permRef = { count: nodeIds.length, object: perm, @@ -597,7 +597,7 @@ class ACL { ); const ops: Array = []; if (permId == null) { - const permId = await this.generatePermId(); + const permId = this.generatePermId(); const permRef = { count: 1, object: perm, diff --git a/src/agent/GRPCClientAgent.ts b/src/agent/GRPCClientAgent.ts index 4190f66b6..731e7213e 100644 --- a/src/agent/GRPCClientAgent.ts +++ b/src/agent/GRPCClientAgent.ts @@ -10,6 +10,7 @@ import type * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import type * as vaultsPB from '../proto/js/polykey/v1/vaults/vaults_pb'; import type * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; import type * as notificationsPB from '../proto/js/polykey/v1/notifications/notifications_pb'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as agentErrors from './errors'; @@ -32,7 +33,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -41,7 +42,7 @@ class GRPCClientAgent extends GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -53,7 +54,7 @@ class GRPCClientAgent extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, logger, }); const grpcClientAgent = new GRPCClientAgent({ diff --git a/src/agent/service/nodesClosestLocalNodesGet.ts b/src/agent/service/nodesClosestLocalNodesGet.ts index 559337c9d..be91f41e0 100644 --- a/src/agent/service/nodesClosestLocalNodesGet.ts +++ b/src/agent/service/nodesClosestLocalNodesGet.ts @@ -1,5 +1,5 @@ import type * as grpc from '@grpc/grpc-js'; -import type { NodeConnectionManager } from '../../nodes'; +import type { NodeGraph } from '../../nodes'; import type { NodeId } from '../../nodes/types'; import { utils as grpcUtils } from '../../grpc'; import { utils as nodesUtils } from '../../nodes'; @@ -11,11 +11,7 @@ import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; * Retrieves the local nodes (i.e. from the current node) that are closest * to some provided node ID. */ -function nodesClosestLocalNodesGet({ - nodeConnectionManager, -}: { - nodeConnectionManager: NodeConnectionManager; -}) { +function nodesClosestLocalNodesGet({ nodeGraph }: { nodeGraph: NodeGraph }) { return async ( call: grpc.ServerUnaryCall, callback: grpc.sendUnaryData, @@ -38,17 +34,15 @@ function nodesClosestLocalNodesGet({ }, ); // Get all local nodes that are closest to the target node from the request - const closestNodes = await nodeConnectionManager.getClosestLocalNodes( - nodeId, - ); - for (const node of closestNodes) { + const closestNodes = await nodeGraph.getClosestNodes(nodeId); + for (const [nodeId, nodeData] of closestNodes) { const addressMessage = new nodesPB.Address(); - addressMessage.setHost(node.address.host); - addressMessage.setPort(node.address.port); + addressMessage.setHost(nodeData.address.host); + addressMessage.setPort(nodeData.address.port); // Add the node to the response's map (mapping of node ID -> node address) response .getNodeTableMap() - .set(nodesUtils.encodeNodeId(node.id), addressMessage); + .set(nodesUtils.encodeNodeId(nodeId), addressMessage); } callback(null, response); return; diff --git a/src/bin/nodes/CommandGetAll.ts b/src/bin/nodes/CommandGetAll.ts new file mode 100644 index 000000000..243991fc9 --- /dev/null +++ b/src/bin/nodes/CommandGetAll.ts @@ -0,0 +1,77 @@ +import type PolykeyClient from '../../PolykeyClient'; +import type nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; +import CommandPolykey from '../CommandPolykey'; +import * as binUtils from '../utils'; +import * as binOptions from '../utils/options'; +import * as binProcessors from '../utils/processors'; + +class CommandGetAll extends CommandPolykey { + constructor(...args: ConstructorParameters) { + super(...args); + this.name('getall'); + this.description('Get all Nodes from Node Graph'); + this.addOption(binOptions.nodeId); + this.addOption(binOptions.clientHost); + this.addOption(binOptions.clientPort); + this.action(async (options) => { + const { default: PolykeyClient } = await import('../../PolykeyClient'); + const utilsPB = await import('../../proto/js/polykey/v1/utils/utils_pb'); + + const clientOptions = await binProcessors.processClientOptions( + options.nodePath, + options.nodeId, + options.clientHost, + options.clientPort, + this.fs, + this.logger.getChild(binProcessors.processClientOptions.name), + ); + const meta = await binProcessors.processAuthentication( + options.passwordFile, + this.fs, + ); + let pkClient: PolykeyClient; + this.exitHandlers.handlers.push(async () => { + if (pkClient != null) await pkClient.stop(); + }); + let result: nodesPB.NodeBuckets; + try { + pkClient = await PolykeyClient.createPolykeyClient({ + nodePath: options.nodePath, + nodeId: clientOptions.nodeId, + host: clientOptions.clientHost, + port: clientOptions.clientPort, + logger: this.logger.getChild(PolykeyClient.name), + }); + const emptyMessage = new utilsPB.EmptyMessage(); + result = await binUtils.retryAuthentication( + (auth) => pkClient.grpcClient.nodesGetAll(emptyMessage, auth), + meta, + ); + let output: any = {}; + for (const [bucketIndex, bucket] of result.getBucketsMap().entries()) { + output[bucketIndex] = {}; + for (const [encodedId, address] of bucket + .getNodeTableMap() + .entries()) { + output[bucketIndex][encodedId] = {}; + output[bucketIndex][encodedId].host = address.getHost(); + output[bucketIndex][encodedId].port = address.getPort(); + } + } + if (options.format === 'human') { + output = [result.getBucketsMap().getEntryList()]; + } + process.stdout.write( + binUtils.outputFormatter({ + type: options.format === 'json' ? 'json' : 'list', + data: output, + }), + ); + } finally { + if (pkClient! != null) await pkClient.stop(); + } + }); + } +} + +export default CommandGetAll; diff --git a/src/bin/nodes/CommandNodes.ts b/src/bin/nodes/CommandNodes.ts index 6827d01f3..0866a088f 100644 --- a/src/bin/nodes/CommandNodes.ts +++ b/src/bin/nodes/CommandNodes.ts @@ -2,6 +2,7 @@ import CommandAdd from './CommandAdd'; import CommandClaim from './CommandClaim'; import CommandFind from './CommandFind'; import CommandPing from './CommandPing'; +import CommandGetAll from './CommandGetAll'; import CommandPolykey from '../CommandPolykey'; class CommandNodes extends CommandPolykey { @@ -13,6 +14,7 @@ class CommandNodes extends CommandPolykey { this.addCommand(new CommandClaim(...args)); this.addCommand(new CommandFind(...args)); this.addCommand(new CommandPing(...args)); + this.addCommand(new CommandGetAll(...args)); } } diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 422709b01..60844fc19 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -4,6 +4,7 @@ import path from 'path'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import * as bootstrapErrors from './errors'; +import Queue from '../nodes/Queue'; import { IdentitiesManager } from '../identities'; import { SessionManager } from '../sessions'; import { Status } from '../status'; @@ -141,10 +142,12 @@ async function bootstrapState({ keyManager, logger: logger.getChild(NodeGraph.name), }); + const queue = new Queue({ logger }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger: logger.getChild(NodeConnectionManager.name), }); const nodeManager = new NodeManager({ @@ -153,6 +156,7 @@ async function bootstrapState({ nodeGraph, nodeConnectionManager, sigchain, + queue, logger: logger.getChild(NodeManager.name), }); const notificationsManager = diff --git a/src/claims/utils.ts b/src/claims/utils.ts index faee8ea4b..ea5ecf15d 100644 --- a/src/claims/utils.ts +++ b/src/claims/utils.ts @@ -62,7 +62,7 @@ async function createClaim({ const byteEncoder = new TextEncoder(); const claim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); claim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await claim.sign(); return signedClaim as ClaimEncoded; @@ -83,14 +83,14 @@ async function signExistingClaim({ kid: NodeIdEncoded; alg?: string; }): Promise { - const decodedClaim = await decodeClaim(claim); + const decodedClaim = decodeClaim(claim); // Reconstruct the claim with our own signature // Make the payload contents deterministic const canonicalizedPayload = canonicalize(decodedClaim.payload); const byteEncoder = new TextEncoder(); const newClaim = new GeneralSign(byteEncoder.encode(canonicalizedPayload)); newClaim - .addSignature(await createPrivateKey(privateKey)) + .addSignature(createPrivateKey(privateKey)) .setProtectedHeader({ alg: alg, kid: kid }); const signedClaim = await newClaim.sign(); // Add our signature to the existing claim diff --git a/src/client/GRPCClientClient.ts b/src/client/GRPCClientClient.ts index 3b07305ea..78f1f398a 100644 --- a/src/client/GRPCClientClient.ts +++ b/src/client/GRPCClientClient.ts @@ -14,6 +14,7 @@ import type * as identitiesPB from '../proto/js/polykey/v1/identities/identities import type * as keysPB from '../proto/js/polykey/v1/keys/keys_pb'; import type * as permissionsPB from '../proto/js/polykey/v1/permissions/permissions_pb'; import type * as secretsPB from '../proto/js/polykey/v1/secrets/secrets_pb'; +import type { Timer } from '../types'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import Logger from '@matrixai/logger'; import * as clientErrors from './errors'; @@ -38,7 +39,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback = async () => {}, logger = new Logger(this.name), }: { @@ -48,7 +49,7 @@ class GRPCClientClient extends GRPCClient { tlsConfig?: Partial; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -64,7 +65,7 @@ class GRPCClientClient extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); @@ -565,6 +566,14 @@ class GRPCClientClient extends GRPCClient { )(...args); } + @ready(new clientErrors.ErrorClientClientDestroyed()) + public nodesGetAll(...args) { + return grpcUtils.promisifyUnaryCall( + this.client, + this.client.nodesGetAll, + )(...args); + } + @ready(new clientErrors.ErrorClientClientDestroyed()) public identitiesAuthenticate(...args) { return grpcUtils.promisifyReadableStreamCall( diff --git a/src/client/service/index.ts b/src/client/service/index.ts index 494c5088c..b3f72211c 100644 --- a/src/client/service/index.ts +++ b/src/client/service/index.ts @@ -58,6 +58,7 @@ import nodesAdd from './nodesAdd'; import nodesClaim from './nodesClaim'; import nodesFind from './nodesFind'; import nodesPing from './nodesPing'; +import nodesGetAll from './nodesGetAll'; import notificationsClear from './notificationsClear'; import notificationsRead from './notificationsRead'; import notificationsSend from './notificationsSend'; @@ -161,6 +162,7 @@ function createService({ nodesClaim: nodesClaim(container), nodesFind: nodesFind(container), nodesPing: nodesPing(container), + nodesGetAll: nodesGetAll(container), notificationsClear: notificationsClear(container), notificationsRead: notificationsRead(container), notificationsSend: notificationsSend(container), diff --git a/src/client/service/nodesFind.ts b/src/client/service/nodesFind.ts index 7982fd9ad..080d9aae3 100644 --- a/src/client/service/nodesFind.ts +++ b/src/client/service/nodesFind.ts @@ -7,6 +7,7 @@ import { utils as grpcUtils } from '../../grpc'; import { validateSync, utils as validationUtils } from '../../validation'; import { matchSync } from '../../utils'; import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; +import * as nodesErrors from '../../nodes/errors'; /** * Attempts to get the node address of a provided node ID (by contacting @@ -44,6 +45,7 @@ function nodesFind({ }, ); const address = await nodeConnectionManager.findNode(nodeId); + if (address == null) throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); response .setNodeId(nodesUtils.encodeNodeId(nodeId)) .setAddress( diff --git a/src/client/service/nodesGetAll.ts b/src/client/service/nodesGetAll.ts new file mode 100644 index 000000000..bc01e84e0 --- /dev/null +++ b/src/client/service/nodesGetAll.ts @@ -0,0 +1,71 @@ +import type * as grpc from '@grpc/grpc-js'; +import type { Authenticate } from '../types'; +import type { KeyManager } from '../../keys'; +import type { NodeId } from '../../nodes/types'; +import type * as utilsPB from '../../proto/js/polykey/v1/utils/utils_pb'; +import { IdInternal } from '@matrixai/id'; +import { utils as nodesUtils } from '../../nodes'; +import { utils as grpcUtils } from '../../grpc'; +import * as nodesPB from '../../proto/js/polykey/v1/nodes/nodes_pb'; + +/** + * Retrieves all nodes from all buckets in the NodeGraph. + */ +function nodesGetAll({ + // NodeGraph, + keyManager, + authenticate, +}: { + // NodeGraph: NodeGraph; + keyManager: KeyManager; + authenticate: Authenticate; +}) { + return async ( + call: grpc.ServerUnaryCall, + callback: grpc.sendUnaryData, + ): Promise => { + try { + const response = new nodesPB.NodeBuckets(); + const metadata = await authenticate(call.metadata); + call.sendMetadata(metadata); + // FIXME: + // const buckets = await nodeGraph.getAllBuckets(); + const buckets: any = []; + for (const b of buckets) { + let index; + for (const id of Object.keys(b)) { + const encodedId = nodesUtils.encodeNodeId( + IdInternal.fromString(id), + ); + const address = new nodesPB.Address() + .setHost(b[id].address.host) + .setPort(b[id].address.port); + // For every node in every bucket, add it to our message + if (!index) { + index = nodesUtils.bucketIndex( + keyManager.getNodeId(), + IdInternal.fromString(id), + ); + } + // Need to either add node to an existing bucket, or create a new + // bucket (if doesn't exist) + const bucket = response.getBucketsMap().get(index); + if (bucket) { + bucket.getNodeTableMap().set(encodedId, address); + } else { + const newBucket = new nodesPB.NodeTable(); + newBucket.getNodeTableMap().set(encodedId, address); + response.getBucketsMap().set(index, newBucket); + } + } + } + callback(null, response); + return; + } catch (e) { + callback(grpcUtils.fromError(e)); + return; + } + }; +} + +export default nodesGetAll; diff --git a/src/discovery/Discovery.ts b/src/discovery/Discovery.ts index 900b6b63f..b6a50a196 100644 --- a/src/discovery/Discovery.ts +++ b/src/discovery/Discovery.ts @@ -148,7 +148,7 @@ class Discovery { reverse: true, }); for await (const o of keyStream) { - latestId = IdInternal.fromBuffer(o); + latestId = IdInternal.fromBuffer(o as Buffer); } this.discoveryQueueIdGenerator = discoveryUtils.createDiscoveryQueueIdGenerator(latestId); @@ -208,8 +208,9 @@ class Discovery { while (true) { if (!(await this.queueIsEmpty())) { for await (const o of this.discoveryQueueDb.createReadStream()) { - const vertexId = IdInternal.fromBuffer(o.key) as DiscoveryQueueId; - const data = o.value as Buffer; + const kv = o as any; + const vertexId = IdInternal.fromBuffer(kv.key) as DiscoveryQueueId; + const data = kv.value as Buffer; const vertex = await this.db.deserializeDecrypt( data, false, @@ -438,7 +439,9 @@ class Discovery { limit: 1, }); for await (const o of keyStream) { - nextDiscoveryQueueId = IdInternal.fromBuffer(o); + nextDiscoveryQueueId = IdInternal.fromBuffer( + o as Buffer, + ); } if (nextDiscoveryQueueId == null) { return true; diff --git a/src/grpc/GRPCClient.ts b/src/grpc/GRPCClient.ts index b55d3a275..13042ec5a 100644 --- a/src/grpc/GRPCClient.ts +++ b/src/grpc/GRPCClient.ts @@ -9,6 +9,7 @@ import type { import type { NodeId } from '../nodes/types'; import type { Certificate } from '../keys/types'; import type { Host, Port, TLSConfig, ProxyConfig } from '../network/types'; +import type { Timer } from '../types'; import http2 from 'http2'; import Logger from '@matrixai/logger'; import * as grpc from '@grpc/grpc-js'; @@ -44,7 +45,7 @@ abstract class GRPCClient { port, tlsConfig, proxyConfig, - timeout = Infinity, + timer, interceptors = [], logger = new Logger(this.name), }: { @@ -58,7 +59,7 @@ abstract class GRPCClient { port: Port; tlsConfig?: Partial; proxyConfig?: ProxyConfig; - timeout?: number; + timer?: Timer; interceptors?: Array; logger?: Logger; }): Promise<{ @@ -123,9 +124,17 @@ abstract class GRPCClient { } const waitForReady = promisify(client.waitForReady).bind(client); // Add the current unix time because grpc expects the milliseconds since unix epoch - timeout += Date.now(); try { - await waitForReady(timeout); + if (timer != null) { + await Promise.race([timer.timerP, waitForReady(Infinity)]); + // If the timer resolves first we throw a timeout error + if (timer?.timedOut === true) { + throw new grpcErrors.ErrorGRPCClientTimeout(); + } + } else { + // No timer given so we wait forever + await waitForReady(Infinity); + } } catch (e) { // If we fail here then we leak the client object... client.close(); diff --git a/src/identities/providers/github/GitHubProvider.ts b/src/identities/providers/github/GitHubProvider.ts index 4dc939999..e5bd22bf9 100644 --- a/src/identities/providers/github/GitHubProvider.ts +++ b/src/identities/providers/github/GitHubProvider.ts @@ -507,7 +507,7 @@ class GitHubProvider extends Provider { ); } const data = await response.text(); - const claimIds = await this.extractClaimIds(data); + const claimIds = this.extractClaimIds(data); for (const claimId of claimIds) { const claim = await this.getClaim(authIdentityId, claimId); if (claim != null) { diff --git a/src/keys/utils.ts b/src/keys/utils.ts index 02ea313f9..fc621068b 100644 --- a/src/keys/utils.ts +++ b/src/keys/utils.ts @@ -508,7 +508,16 @@ function publicKeyBitSize(publicKey: PublicKey): number { } async function getRandomBytes(size: number): Promise { - return Buffer.from(await random.getBytes(size), 'binary'); + const p = new Promise((resolve, reject) => { + random.getBytes(size, (e, bytes) => { + if (e != null) { + reject(e); + } else { + resolve(bytes); + } + }); + }); + return Buffer.from(await p, 'binary'); } function getRandomBytesSync(size: number): Buffer { diff --git a/src/network/Proxy.ts b/src/network/Proxy.ts index 15bbf4d05..3e7945b2d 100644 --- a/src/network/Proxy.ts +++ b/src/network/Proxy.ts @@ -1,5 +1,12 @@ import type { AddressInfo, Socket } from 'net'; -import type { Host, Port, Address, ConnectionInfo, TLSConfig } from './types'; +import type { + Host, + Port, + Address, + ConnectionInfo, + TLSConfig, + ConnectionEstablishedCallback, +} from './types'; import type { ConnectionsForward } from './ConnectionForward'; import type { NodeId } from '../nodes/types'; import type { Timer } from '../types'; @@ -47,6 +54,7 @@ class Proxy { proxy: new Map(), reverse: new Map(), }; + protected connectionEstablishedCallback: ConnectionEstablishedCallback; constructor({ authToken, @@ -55,6 +63,7 @@ class Proxy { connEndTime = 1000, connPunchIntervalTime = 1000, connKeepAliveIntervalTime = 1000, + connectionEstablishedCallback = () => {}, logger, }: { authToken: string; @@ -63,6 +72,7 @@ class Proxy { connEndTime?: number; connPunchIntervalTime?: number; connKeepAliveIntervalTime?: number; + connectionEstablishedCallback?: ConnectionEstablishedCallback; logger?: Logger; }) { this.logger = logger ?? new Logger(Proxy.name); @@ -76,6 +86,7 @@ class Proxy { this.server = http.createServer(); this.server.on('request', this.handleRequest); this.server.on('connect', this.handleConnectForward); + this.connectionEstablishedCallback = connectionEstablishedCallback; this.logger.info(`Created ${Proxy.name}`); } @@ -518,6 +529,14 @@ class Proxy { timer, ); conn.compose(clientSocket); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getServerNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'forward', + }); } protected async establishConnectionForward( @@ -684,6 +703,14 @@ class Proxy { timer, ); await conn.compose(utpConn, timer); + // With the connection composed without error we can assume that the + // connection was established and verified + await this.connectionEstablishedCallback({ + remoteNodeId: conn.getClientNodeIds()[0], + remoteHost: conn.host, + remotePort: conn.port, + type: 'reverse', + }); } protected async establishConnectionReverse( diff --git a/src/network/types.ts b/src/network/types.ts index 40d672a85..a5a62b4c2 100644 --- a/src/network/types.ts +++ b/src/network/types.ts @@ -55,6 +55,15 @@ type ConnectionInfo = { remotePort: Port; }; +type ConnectionData = { + remoteNodeId: NodeId; + remoteHost: Host; + remotePort: Port; + type: 'forward' | 'reverse'; +}; + +type ConnectionEstablishedCallback = (data: ConnectionData) => any; + type PingMessage = { type: 'ping'; }; @@ -73,6 +82,8 @@ export type { TLSConfig, ProxyConfig, ConnectionInfo, + ConnectionData, + ConnectionEstablishedCallback, PingMessage, PongMessage, NetworkMessage, diff --git a/src/network/utils.ts b/src/network/utils.ts index 8347da631..ec6649a91 100644 --- a/src/network/utils.ts +++ b/src/network/utils.ts @@ -45,10 +45,12 @@ function isHostname(hostname: any): hostname is Hostname { /** * Ports must be numbers between 0 and 65535 inclusive + * If connect is true, then port must be a number between 1 and 65535 inclusive */ -function isPort(port: any): port is Port { +function isPort(port: any, connect: boolean = false): port is Port { if (typeof port !== 'number') return false; if (port < 0 || port > 65535) return false; + if (connect && port === 0) return false; return true; } diff --git a/src/nodes/NodeConnection.ts b/src/nodes/NodeConnection.ts index 6788c20fe..8f02e6144 100644 --- a/src/nodes/NodeConnection.ts +++ b/src/nodes/NodeConnection.ts @@ -5,6 +5,7 @@ import type { Certificate, PublicKey, PublicKeyPem } from '../keys/types'; import type Proxy from '../network/Proxy'; import type GRPCClient from '../grpc/GRPCClient'; import type NodeConnectionManager from './NodeConnectionManager'; +import type { Timer } from '../types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import * as asyncInit from '@matrixai/async-init'; @@ -38,7 +39,7 @@ class NodeConnection { targetHost, targetPort, targetHostname, - connConnectTime = 20000, + timer, proxy, keyManager, clientFactory, @@ -50,7 +51,7 @@ class NodeConnection { targetHost: Host; targetPort: Port; targetHostname?: Hostname; - connConnectTime?: number; + timer?: Timer; proxy: Proxy; keyManager: KeyManager; clientFactory: (...args) => Promise; @@ -125,7 +126,7 @@ class NodeConnection { await nodeConnection.destroy(); } }, - timeout: connConnectTime, + timer: timer, }), holePunchPromises, ]); diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index 6160aa60a..ba4361f89 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -4,6 +4,7 @@ import type { Host, Hostname, Port } from '../network/types'; import type { ResourceAcquire } from '../utils'; import type { Timer } from '../types'; import type NodeGraph from './NodeGraph'; +import type Queue from './Queue'; import type { NodeId, NodeAddress, @@ -11,6 +12,8 @@ import type { SeedNodes, NodeIdString, } from './types'; +import type NodeManager from './NodeManager'; +import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; import { IdInternal } from '@matrixai/id'; @@ -24,7 +27,7 @@ import * as networkUtils from '../network/utils'; import * as agentErrors from '../agent/errors'; import * as grpcErrors from '../grpc/errors'; import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; -import { RWLock, withF } from '../utils'; +import { RWLock, timerStart, withF } from '../utils'; type ConnectionAndLock = { connection?: NodeConnection; @@ -36,7 +39,7 @@ interface NodeConnectionManager extends StartStop {} @StartStop() class NodeConnectionManager { /** - * Time used to estalish `NodeConnection` + * Time used to establish `NodeConnection` */ public readonly connConnectTime: number; @@ -54,6 +57,9 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; + protected queue: Queue; + // NodeManager has to be passed in during start to allow co-dependency + protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; /** * Data structure to store all NodeConnections. If a connection to a node n does @@ -71,6 +77,7 @@ class NodeConnectionManager { keyManager, nodeGraph, proxy, + queue, seedNodes = {}, initialClosestNodes = 3, connConnectTime = 20000, @@ -80,6 +87,7 @@ class NodeConnectionManager { nodeGraph: NodeGraph; keyManager: KeyManager; proxy: Proxy; + queue: Queue; seedNodes?: SeedNodes; initialClosestNodes?: number; connConnectTime?: number; @@ -90,14 +98,16 @@ class NodeConnectionManager { this.keyManager = keyManager; this.nodeGraph = nodeGraph; this.proxy = proxy; + this.queue = queue; this.seedNodes = seedNodes; this.initialClosestNodes = initialClosestNodes; this.connConnectTime = connConnectTime; this.connTimeoutTime = connTimeoutTime; } - public async start() { + public async start({ nodeManager }: { nodeManager: NodeManager }) { this.logger.info(`Starting ${this.constructor.name}`); + this.nodeManager = nodeManager; for (const nodeIdEncoded in this.seedNodes) { const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; await this.nodeGraph.setNode(nodeId, this.seedNodes[nodeIdEncoded]); @@ -107,6 +117,7 @@ class NodeConnectionManager { public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); + this.nodeManager = undefined; for (const [nodeId, connAndLock] of this.connections) { if (connAndLock == null) continue; if (connAndLock.connection == null) continue; @@ -122,14 +133,17 @@ class NodeConnectionManager { * itself is such that we can pass targetNodeId as a parameter (as opposed to * an acquire function with no parameters). * @param targetNodeId Id of target node to communicate with + * @param timer Connection timeout timer + * @param address Optional address to connect to * @returns ResourceAcquire Resource API for use in with contexts */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async acquireConnection( targetNodeId: NodeId, + timer?: Timer, ): Promise>> { return async () => { - const connAndLock = await this.getConnection(targetNodeId); + const connAndLock = await this.getConnection(targetNodeId, timer); // Acquire the read lock and the release function const release = await connAndLock.lock.acquireRead(); // Resetting TTL timer @@ -151,15 +165,17 @@ class NodeConnectionManager { * for use with normal arrow function * @param targetNodeId Id of target node to communicate with * @param f Function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async withConnF( targetNodeId: NodeId, f: (conn: NodeConnection) => Promise, + timer?: Timer, ): Promise { try { return await withF( - [await this.acquireConnection(targetNodeId)], + [await this.acquireConnection(targetNodeId, timer)], async ([conn]) => { this.logger.info( `withConnF calling function with connection to ${nodesUtils.encodeNodeId( @@ -189,6 +205,7 @@ class NodeConnectionManager { * for use with a generator function * @param targetNodeId Id of target node to communicate with * @param g Generator function to handle communication + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async *withConnG( @@ -196,11 +213,12 @@ class NodeConnectionManager { g: ( conn: NodeConnection, ) => AsyncGenerator, + timer?: Timer, ): AsyncGenerator { - const acquire = await this.acquireConnection(targetNodeId); + const acquire = await this.acquireConnection(targetNodeId, timer); const [release, conn] = await acquire(); try { - return yield* await g(conn!); + return yield* g(conn!); } catch (err) { if ( err instanceof nodesErrors.ErrorNodeConnectionDestroyed || @@ -222,10 +240,12 @@ class NodeConnectionManager { * Create a connection to another node (without performing any function). * This is a NOOP if a connection already exists. * @param targetNodeId Id of node we are creating connection to - * @returns ConnectionAndLock that was create or exists in the connection map. + * @param timer Connection timeout timer + * @returns ConnectionAndLock that was create or exists in the connection map */ protected async getConnection( targetNodeId: NodeId, + timer?: Timer, ): Promise { this.logger.info( `Getting connection to ${nodesUtils.encodeNodeId(targetNodeId)}`, @@ -257,7 +277,7 @@ class NodeConnectionManager { )}`, ); // Creating the connection and set in map - return await this.establishNodeConnection(targetNodeId, lock); + return await this.establishNodeConnection(targetNodeId, lock, timer); }); } else { lock = new RWLock(); @@ -273,7 +293,7 @@ class NodeConnectionManager { )}`, ); // Creating the connection and set in map - return await this.establishNodeConnection(targetNodeId, lock); + return await this.establishNodeConnection(targetNodeId, lock, timer); }); } } @@ -286,13 +306,18 @@ class NodeConnectionManager { * This only adds the connection to the connection map if the connection was established. * @param targetNodeId Id of node we are establishing connection to * @param lock Lock associated with connection + * @param timer Connection timeout timer * @returns ConnectionAndLock that was added to the connection map */ protected async establishNodeConnection( targetNodeId: NodeId, lock: RWLock, + timer?: Timer, ): Promise { const targetAddress = await this.findNode(targetNodeId); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + } // If the stored host is not a valid host (IP address), then we assume it to // be a hostname const targetHostname = !networkUtils.isHost(targetAddress.host) @@ -325,19 +350,22 @@ class NodeConnectionManager { keyManager: this.keyManager, nodeConnectionManager: this, destroyCallback, - connConnectTime: this.connConnectTime, + timer: timer ?? timerStart(this.connConnectTime), logger: this.logger.getChild( `${NodeConnection.name} ${targetHost}:${targetAddress.port}`, ), clientFactory: async (args) => GRPCClientAgent.createGRPCClientAgent(args), }); + // We can assume connection was established and destination was valid, + // we can add the target to the nodeGraph + await this.nodeManager?.setNode(targetNodeId, targetAddress, false); // Creating TTL timeout - const timer = setTimeout(async () => { + const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); }, this.connTimeoutTime); // Add it to the map of active connections - const connectionAndLock = { connection, lock, timer }; + const connectionAndLock = { connection, lock, timer: timeToLiveTimer }; this.connections.set( targetNodeId.toString() as NodeIdString, connectionAndLock, @@ -416,67 +444,26 @@ class NodeConnectionManager { * Retrieves the node address. If an entry doesn't exist in the db, then * proceeds to locate it using Kademlia. * @param targetNodeId Id of the node we are tying to find + * @param options */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async findNode(targetNodeId: NodeId): Promise { + public async findNode( + targetNodeId: NodeId, + options: { signal?: AbortSignal } = {}, + ): Promise { + const { signal } = { ...options }; // First check if we already have an existing ID -> address record - - let address = await this.nodeGraph.getNode(targetNodeId); + let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network - if (address == null) { - address = await this.getClosestGlobalNodes(targetNodeId); - // TODO: This currently just does one iteration - // If not found in this single iteration, we throw an exception - if (address == null) { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); - } - } - // We ensure that we always return a NodeAddress (either by lookup, or - // network search) - if we can't locate it from either, we throw an exception + address = + address ?? + (await this.getClosestGlobalNodes(targetNodeId, undefined, { + signal, + })); + // TODO: This currently just does one iteration return address; } - /** - * Finds the set of nodes (of size k) known by the current node (i.e. in its - * buckets database) that have the smallest distance to the target node (i.e. - * are closest to the target node). - * i.e. FIND_NODE RPC from Kademlia spec - * - * Used by the RPC service. - * - * @param targetNodeId the node ID to find other nodes closest to it - * @param numClosest the number of closest nodes to return (by default, returns - * according to the maximum number of nodes per bucket) - * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the - * current node has less than k nodes in all of its buckets, in which case it - * returns all nodes it has knowledge of) - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async getClosestLocalNodes( - targetNodeId: NodeId, - numClosest: number = this.nodeGraph.maxNodesPerBucket, - ): Promise> { - // Retrieve all nodes from buckets in database - const buckets = await this.nodeGraph.getAllBuckets(); - // Iterate over all of the nodes in each bucket - const distanceToNodes: Array = []; - buckets.forEach(function (bucket) { - for (const nodeIdString of Object.keys(bucket)) { - // Compute the distance from the node, and add it to the array - const nodeId = IdInternal.fromString(nodeIdString); - distanceToNodes.push({ - id: nodeId, - address: bucket[nodeId].address, - distance: nodesUtils.calculateDistance(nodeId, targetNodeId), - }); - } - }); - // Sort the array (based on the distance at index 1) - distanceToNodes.sort(nodesUtils.sortByDistance); - // Return the closest k nodes (i.e. the first k), or all nodes if < k in array - return distanceToNodes.slice(0, numClosest); - } - /** * Attempts to locate a target node in the network (using Kademlia). * Adds all discovered, active nodes to the current node's database (up to k @@ -489,16 +476,21 @@ class NodeConnectionManager { * port). * @param targetNodeId ID of the node attempting to be found (i.e. attempting * to find its IP address and port) + * @param timer Connection timeout timer + * @param options * @returns whether the target node was located in the process */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getClosestGlobalNodes( targetNodeId: NodeId, + timer?: Timer, + options: { signal?: AbortSignal } = {}, ): Promise { + const { signal } = { ...options }; // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) - const shortlist: Array = await this.getClosestLocalNodes( + const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, ); @@ -513,7 +505,8 @@ class NodeConnectionManager { // getClosestGlobalNodes()? const contacted: { [nodeId: string]: boolean } = {}; // Iterate until we've found found and contacted k nodes - while (Object.keys(contacted).length <= this.nodeGraph.maxNodesPerBucket) { + while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); // While (!foundTarget) { // Remove the node from the front of the array const nextNode = shortlist.shift(); @@ -521,8 +514,9 @@ class NodeConnectionManager { if (nextNode == null) { break; } + const [nextNodeId, nextNodeAddress] = nextNode; // Skip if the node has already been contacted - if (contacted[nextNode.id]) { + if (contacted[nextNodeId]) { continue; } // Connect to the node (check if pre-existing connection exists, otherwise @@ -530,41 +524,47 @@ class NodeConnectionManager { try { // Add the node to the database so that we can find its address in // call to getConnectionToNode - await this.nodeGraph.setNode(nextNode.id, nextNode.address); - await this.getConnection(nextNode.id); + await this.nodeGraph.setNode(nextNodeId, nextNodeAddress.address); + await this.getConnection(nextNodeId, timer); } catch (e) { // If we can't connect to the node, then skip it continue; } - contacted[nextNode.id] = true; + contacted[nextNodeId] = true; // Ask the node to get their own closest nodes to the target const foundClosest = await this.getRemoteNodeClosestNodes( - nextNode.id, + nextNodeId, targetNodeId, + timer, ); // Check to see if any of these are the target node. At the same time, add // them to the shortlist - for (const nodeData of foundClosest) { - // Ignore any nodes that have been contacted - if (contacted[nodeData.id]) { + for (const [nodeId, nodeData] of foundClosest) { + if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); + // Ignore a`ny nodes that have been contacted + if (contacted[nodeId]) { continue; } - if (nodeData.id.equals(targetNodeId)) { - await this.nodeGraph.setNode(nodeData.id, nodeData.address); + if (nodeId.equals(targetNodeId)) { + await this.nodeGraph.setNode(nodeId, nodeData.address); foundAddress = nodeData.address; // We have found the target node, so we can stop trying to look for it // in the shortlist break; } - shortlist.push(nodeData); + shortlist.push([nodeId, nodeData]); } // To make the number of jumps relatively short, should connect to the nodes // closest to the target first, and ask if they know of any closer nodes // Then we can simply unshift the first (closest) element from the shortlist - shortlist.sort(function (a: NodeData, b: NodeData) { - if (a.distance > b.distance) { + const distance = (nodeId: NodeId) => + nodesUtils.nodeDistance(targetNodeId, nodeId); + shortlist.sort(function ([nodeIdA], [nodeIdB]) { + const distanceA = distance(nodeIdA); + const distanceB = distance(nodeIdB); + if (distanceA > distanceB) { return 1; - } else if (a.distance < b.distance) { + } else if (distanceA < distanceB) { return -1; } else { return 0; @@ -579,69 +579,99 @@ class NodeConnectionManager { * target node ID. * @param nodeId the node ID to search on * @param targetNodeId the node ID to find other nodes closest to it + * @param timer Connection timeout timer * @returns list of nodes and their IP/port that are closest to the target */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, - ): Promise> { + timer?: Timer, + ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); // Send through client - return this.withConnF(nodeId, async (connection) => { - const client = await connection.getClient(); - const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array = []; - // Loop over each map element (from the returned response) and populate nodes - response.getNodeTableMap().forEach((address, nodeIdString: string) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdString); - // If the nodeId is not valid we don't add it to the list of nodes - if (nodeId != null) { - nodes.push({ - id: nodeId, - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, - }, - distance: nodesUtils.calculateDistance(targetNodeId, nodeId), - }); - } - }); - return nodes; - }); + return this.withConnF( + nodeId, + async (connection) => { + const client = connection.getClient(); + const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); + const nodes: Array<[NodeId, NodeData]> = []; + // Loop over each map element (from the returned response) and populate nodes + response.getNodeTableMap().forEach((address, nodeIdString: string) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdString); + // If the nodeId is not valid we don't add it to the list of nodes + if (nodeId != null) { + nodes.push([ + nodeId, + { + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + lastUpdated: 0, // FIXME? + }, + ]); + } + }); + return nodes; + }, + timer, + ); } /** * Perform an initial database synchronisation: get the k closest nodes * from each seed node and add them to this database - * For now, we also attempt to establish a connection to each of them. - * If these nodes are offline, this will impose a performance penalty, - * so we should investigate performing this in the background if possible. - * Alternatively, we can also just add the nodes to our database without - * establishing connection. - * This has been removed from start() as there's a chicken-egg scenario - * where we require the NodeGraph instance to be created in order to get - * connections. + * Establish a proxy connection to each node before adding it + * By default this operation is blocking, set `block` to false to make it + * non-blocking */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async syncNodeGraph() { + public async syncNodeGraph(block: boolean = true, timer?: Timer) { + this.logger.info('Syncing nodeGraph'); for (const seedNodeId of this.getSeedNodes()) { // Check if the connection is viable try { - await this.getConnection(seedNodeId); + await this.getConnection(seedNodeId, timer); } catch (e) { if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; throw e; } - const nodes = await this.getRemoteNodeClosestNodes( seedNodeId, this.keyManager.getNodeId(), + timer, ); - for (const n of nodes) { - await this.nodeGraph.setNode(n.id, n.address); + // FIXME: we need to ping a node before setting it + for (const [nodeId, nodeData] of nodes) { + if (!block) { + this.queue.push(() => + this.nodeManager!.setNode(nodeId, nodeData.address), + ); + } else { + try { + await this.nodeManager?.setNode(nodeId, nodeData.address); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + } + // Refreshing every bucket above the closest node + const refreshBuckets = async () => { + const [closestNode] = ( + await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) + ).pop()!; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { + this.nodeManager?.refreshBucketQueueAdd(i); + } + }; + if (!block) { + this.queue.push(refreshBuckets); + } else { + await refreshBuckets(); } } } @@ -655,6 +685,7 @@ class NodeConnectionManager { * @param targetNodeId node ID of the target node to hole punch * @param proxyAddress stringified address of `proxyHost:proxyPort` * @param signature signature to verify source node is sender (signature based + * @param timer Connection timeout timer * on proxyAddress as message) */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) @@ -664,16 +695,21 @@ class NodeConnectionManager { targetNodeId: NodeId, proxyAddress: string, signature: Buffer, + timer?: Timer, ): Promise { const relayMsg = new nodesPB.Relay(); relayMsg.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMsg.setTargetId(nodesUtils.encodeNodeId(targetNodeId)); relayMsg.setProxyAddress(proxyAddress); relayMsg.setSignature(signature.toString()); - await this.withConnF(relayNodeId, async (connection) => { - const client = connection.getClient(); - await client.nodesHolePunchMessageSend(relayMsg); - }); + await this.withConnF( + relayNodeId, + async (connection) => { + const client = connection.getClient(); + await client.nodesHolePunchMessageSend(relayMsg); + }, + timer, + ); } /** @@ -683,15 +719,20 @@ class NodeConnectionManager { * node). * @param message the original relay message (assumed to be created in * nodeConnection.start()) + * @param timer Connection timeout timer */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async relayHolePunchMessage(message: nodesPB.Relay): Promise { + public async relayHolePunchMessage( + message: nodesPB.Relay, + timer?: Timer, + ): Promise { await this.sendHolePunchMessage( validationUtils.parseNodeId(message.getTargetId()), validationUtils.parseNodeId(message.getSrcId()), validationUtils.parseNodeId(message.getTargetId()), message.getProxyAddress(), Buffer.from(message.getSignature()), + timer, ); } @@ -705,6 +746,55 @@ class NodeConnectionManager { ); return nodeIds; } + + /** + * Checks if a connection can be made to the target. Returns true if the + * connection can be authenticated, it's certificate matches the nodeId and + * the addresses match if provided. Otherwise returns false. + * @param nodeId - NodeId of the target + * @param host - Host of the target node + * @param port - Port of the target node + * @param timer Connection timeout timer + */ + @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + public async pingNode( + nodeId: NodeId, + host: Host, + port: Port, + timer?: Timer, + ): Promise { + // If we can create a connection then we have punched though the NAT, + // authenticated and confirmed the nodeId matches + const proxyAddress = networkUtils.buildAddress( + this.proxy.getProxyHost(), + this.proxy.getProxyPort(), + ); + const signature = await this.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const holePunchPromises = Array.from(this.getSeedNodes(), (seedNodeId) => { + return this.sendHolePunchMessage( + seedNodeId, + this.keyManager.getNodeId(), + nodeId, + proxyAddress, + signature, + ); + }); + const forwardPunchPromise = this.holePunchForward( + nodeId, + host, + port, + timer, + ); + + try { + await Promise.all([forwardPunchPromise, ...holePunchPromises]); + } catch (e) { + return false; + } + return true; + } } export default NodeConnectionManager; diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index 4237b5529..e6bdf078d 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -1,9 +1,15 @@ -import type { DB, DBLevel, DBOp } from '@matrixai/db'; -import type { NodeId, NodeAddress, NodeBucket } from './types'; +import type { DB, DBDomain, DBLevel } from '@matrixai/db'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeData, + NodeBucketMeta, + NodeBucketIndex, + NodeGraphSpace, +} from './types'; import type KeyManager from '../keys/KeyManager'; -import type { Host, Hostname, Port } from '../network/types'; -import { Mutex } from 'async-mutex'; -import lexi from 'lexicographic-integer'; +import type { ResourceAcquire, ResourceRelease } from '../utils'; import Logger from '@matrixai/logger'; import { CreateDestroyStartStop, @@ -12,10 +18,11 @@ import { import { IdInternal } from '@matrixai/id'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; +import { RWLock, getUnixtime } from '../utils'; /** * NodeGraph is an implementation of Kademlia for maintaining peer to peer information - * We maintain a map of buckets. Where each bucket has k number of node infos + * It is a database of fixed-size buckets, where each bucket contains NodeId -> NodeData */ interface NodeGraph extends CreateDestroyStartStop {} @CreateDestroyStartStop( @@ -23,29 +30,16 @@ interface NodeGraph extends CreateDestroyStartStop {} new nodesErrors.ErrorNodeGraphDestroyed(), ) class NodeGraph { - // Max number of nodes in each k-bucket (a.k.a. k) - public readonly maxNodesPerBucket: number = 20; - - protected logger: Logger; - protected db: DB; - protected keyManager: KeyManager; - protected nodeGraphDbDomain: string = this.constructor.name; - protected nodeGraphBucketsDbDomain: Array = [ - this.nodeGraphDbDomain, - 'buckets', - ]; - protected nodeGraphDb: DBLevel; - protected nodeGraphBucketsDb: DBLevel; - protected lock: Mutex = new Mutex(); - public static async createNodeGraph({ db, keyManager, + nodeIdBits = 256, logger = new Logger(this.name), fresh = false, }: { db: DB; keyManager: KeyManager; + nodeIdBits?: number; logger?: Logger; fresh?: boolean; }): Promise { @@ -53,6 +47,7 @@ class NodeGraph { const nodeGraph = new NodeGraph({ db, keyManager, + nodeIdBits, logger, }); await nodeGraph.start({ fresh }); @@ -60,375 +55,760 @@ class NodeGraph { return nodeGraph; } + /** + * Bit size of the NodeIds + * This equals the number of buckets + */ + public readonly nodeIdBits: number; + /** + * Max number of nodes in each k-bucket + */ + public readonly nodeBucketLimit: number = 20; + + protected logger: Logger; + protected db: DB; + protected keyManager: KeyManager; + protected space: NodeGraphSpace; + protected nodeGraphDbDomain: DBDomain = [this.constructor.name]; + protected nodeGraphMetaDbDomain: DBDomain; + protected nodeGraphBucketsDbDomain: DBDomain; + protected nodeGraphLastUpdatedDbDomain: DBDomain; + protected nodeGraphDb: DBLevel; + protected nodeGraphMetaDb: DBLevel; + protected nodeGraphBucketsDb: DBLevel; + protected nodeGraphLastUpdatedDb: DBLevel; + + // WORK out a way to do re-entrancy properly + // Otherwise we have restrictions on the way we are developing stuff + protected lock: RWLock = new RWLock(); + constructor({ db, keyManager, + nodeIdBits, logger, }: { db: DB; keyManager: KeyManager; + nodeIdBits: number; logger: Logger; }) { this.logger = logger; this.db = db; this.keyManager = keyManager; + this.nodeIdBits = nodeIdBits; } get locked(): boolean { return this.lock.isLocked(); } + public acquireLockRead(lazy: boolean = false): ResourceAcquire { + return async () => { + let release: ResourceRelease; + if (lazy && this.lock.isLocked()) { + release = async () => {}; + } else { + const r = await this.lock.acquireRead(); + release = async () => r(); + } + return [release, this.lock]; + }; + } + + public acquireLockWrite(lazy: boolean = false): ResourceAcquire { + return async () => { + let release: ResourceRelease; + if (lazy && this.lock.isLocked()) { + release = async () => {}; + } else { + const r = await this.lock.acquireWrite(); + release = async () => r(); + } + return [release, this.lock]; + }; + } + public async start({ fresh = false, - }: { - fresh?: boolean; - } = {}) { + }: { fresh?: boolean } = {}): Promise { this.logger.info(`Starting ${this.constructor.name}`); - const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain); - // Buckets stores NodeBucketIndex -> NodeBucket - const nodeGraphBucketsDb = await this.db.level( - this.nodeGraphBucketsDbDomain[1], - nodeGraphDb, - ); + const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain[0]); if (fresh) { await nodeGraphDb.clear(); } + // Space key is used to create a swappable sublevel + // when remapping the buckets during `this.refreshBuckets` + const space = await this.setupSpace(); + const nodeGraphMetaDbDomain = [this.nodeGraphDbDomain[0], 'meta' + space]; + const nodeGraphBucketsDbDomain = [ + this.nodeGraphDbDomain[0], + 'buckets' + space, + ]; + const nodeGraphLastUpdatedDbDomain = [ + this.nodeGraphDbDomain[0], + 'lastUpdated' + space, + ]; + // Bucket metadata sublevel: `!meta!! -> value` + const nodeGraphMetaDb = await this.db.level( + nodeGraphMetaDbDomain[1], + nodeGraphDb, + ); + // Bucket sublevel: `!buckets!! -> NodeData` + // The BucketIndex can range from 0 to NodeId bitsize minus 1 + // So 256 bits means 256 buckets of 0 to 255 + const nodeGraphBucketsDb = await this.db.level( + nodeGraphBucketsDbDomain[1], + nodeGraphDb, + ); + // Last updated sublevel: `!lastUpdated!!- -> NodeId` + // This is used as a sorted index of the NodeId by `lastUpdated` timestamp + // The `NodeId` must be appended in the key in order to disambiguate `NodeId` with same `lastUpdated` timestamp + const nodeGraphLastUpdatedDb = await this.db.level( + nodeGraphLastUpdatedDbDomain[1], + nodeGraphDb, + ); + this.space = space; + this.nodeGraphMetaDbDomain = nodeGraphMetaDbDomain; + this.nodeGraphBucketsDbDomain = nodeGraphBucketsDbDomain; + this.nodeGraphLastUpdatedDbDomain = nodeGraphLastUpdatedDbDomain; this.nodeGraphDb = nodeGraphDb; + this.nodeGraphMetaDb = nodeGraphMetaDb; this.nodeGraphBucketsDb = nodeGraphBucketsDb; + this.nodeGraphLastUpdatedDb = nodeGraphLastUpdatedDb; this.logger.info(`Started ${this.constructor.name}`); } - public async stop() { + public async stop(): Promise { this.logger.info(`Stopping ${this.constructor.name}`); this.logger.info(`Stopped ${this.constructor.name}`); } - public async destroy() { + public async destroy(): Promise { this.logger.info(`Destroying ${this.constructor.name}`); - const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain); + // If the DB was stopped, the existing sublevel `this.nodeGraphDb` will not be valid + // Therefore we recreate the sublevel here + const nodeGraphDb = await this.db.level(this.nodeGraphDbDomain[0]); await nodeGraphDb.clear(); this.logger.info(`Destroyed ${this.constructor.name}`); } /** - * Run several operations within the same lock - * This does not ensure atomicity of the underlying database - * Database atomicity still depends on the underlying operation + * Sets up the space key + * The space string is suffixed to the `buckets` and `meta` sublevels + * This is used to allow swapping of sublevels when remapping buckets + * during `this.refreshBuckets` */ - public async transaction( - f: (nodeGraph: NodeGraph) => Promise, - ): Promise { - const release = await this.lock.acquire(); - try { - return await f(this); - } finally { - release(); + protected async setupSpace(): Promise { + let space = await this.db.get( + this.nodeGraphDbDomain, + 'space', + ); + if (space != null) { + return space; } + space = '0'; + await this.db.put(this.nodeGraphDbDomain, 'space', space); + return space; + } + + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getNode(nodeId: NodeId): Promise { + const [bucketIndex] = this.bucketIndex(nodeId); + const bucketDomain = [ + ...this.nodeGraphBucketsDbDomain, + nodesUtils.bucketKey(bucketIndex), + ]; + return await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + ); } /** - * Transaction wrapper that will not lock if the operation was executed - * within a transaction context + * Get all nodes + * Nodes are always sorted by `NodeBucketIndex` first + * Then secondly by the node IDs + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc */ - public async _transaction(f: () => Promise): Promise { - if (this.lock.isLocked()) { - return await f(); - } else { - return await this.transaction(f); + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async *getNodes( + order: 'asc' | 'desc' = 'asc', + ): AsyncGenerator<[NodeId, NodeData]> { + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + reverse: order === 'asc' ? false : true, + })) { + const { nodeId } = nodesUtils.parseBucketsDbKey((o as any).key as Buffer); + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt(data, false); + yield [nodeId, nodeData]; } } /** - * Retrieves the node Address - * @param nodeId node ID of the target node - * @returns Node Address of the target node + * Will add a node to the node graph and increment the bucket count. + * If the node already existed it will be updated. + * @param nodeId NodeId to add to the NodeGraph + * @param nodeAddress Address information to add */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getNode(nodeId: NodeId): Promise { - return await this._transaction(async () => { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, + public async setNode( + nodeId: NodeId, + nodeAddress: NodeAddress, + ): Promise { + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketDomain = [...this.nodeGraphBucketsDbDomain, bucketKey]; + const lastUpdatedDomain = [...this.nodeGraphLastUpdatedDbDomain, bucketKey]; + const nodeData = await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + ); + if (nodeData != null) { + this.logger.debug( + `Updating node ${nodesUtils.encodeNodeId( + nodeId, + )} in bucket ${bucketIndex}`, ); - if (bucket != null && nodeId in bucket) { - return bucket[nodeId].address; - } - return; + // If the node already exists we want to remove the old `lastUpdated` + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, + ); + await this.db.del(lastUpdatedDomain, lastUpdatedKey); + } else { + this.logger.debug( + `Adding node ${nodesUtils.encodeNodeId( + nodeId, + )} to bucket ${bucketIndex}`, + ); + // It didn't exist so we want to increment the bucket count + const count = await this.getBucketMetaProp(bucketIndex, 'count'); + await this.setBucketMetaProp(bucketIndex, 'count', count + 1); + } + const lastUpdated = getUnixtime(); + await this.db.put(bucketDomain, nodesUtils.bucketDbKey(nodeId), { + address: nodeAddress, + lastUpdated, }); + const newLastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + lastUpdated, + nodeId, + ); + await this.db.put( + lastUpdatedDomain, + newLastUpdatedKey, + nodesUtils.bucketDbKey(nodeId), + true, + ); } - /** - * Determines whether a node ID -> node address mapping exists in this node's - * node table. - * @param targetNodeId the node ID of the node to find - * @returns true if the node exists in the table, false otherwise - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async knowsNode(targetNodeId: NodeId): Promise { - return !!(await this.getNode(targetNodeId)); + public async getOldestNode( + bucketIndex: number, + limit: number = 1, + ): Promise> { + const bucketKey = nodesUtils.bucketKey(bucketIndex); + // Remove the oldest entry in the bucket + const lastUpdatedBucketDb = await this.db.level( + bucketKey, + this.nodeGraphLastUpdatedDb, + ); + const oldestNodeIds: Array = []; + for await (const key of lastUpdatedBucketDb.createKeyStream({ limit })) { + const { nodeId } = nodesUtils.parseLastUpdatedBucketDbKey(key as Buffer); + oldestNodeIds.push(nodeId); + } + return oldestNodeIds; } - /** - * Returns the specified bucket if it exists - * @param bucketIndex - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getBucket(bucketIndex: number): Promise { - return await this._transaction(async () => { - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - lexi.pack(bucketIndex, 'hex'), + public async unsetNode(nodeId: NodeId): Promise { + const [bucketIndex, bucketKey] = this.bucketIndex(nodeId); + const bucketDomain = [...this.nodeGraphBucketsDbDomain, bucketKey]; + const lastUpdatedDomain = [...this.nodeGraphLastUpdatedDbDomain, bucketKey]; + const nodeData = await this.db.get( + bucketDomain, + nodesUtils.bucketDbKey(nodeId), + ); + if (nodeData != null) { + this.logger.debug( + `Removing node ${nodesUtils.encodeNodeId( + nodeId, + )} from bucket ${bucketIndex}`, ); - // Cast the non-primitive types correctly (ensures type safety when using them) - for (const nodeId in bucket) { - bucket[nodeId].address.host = bucket[nodeId].address.host as - | Host - | Hostname; - bucket[nodeId].address.port = bucket[nodeId].address.port as Port; - bucket[nodeId].lastUpdated = new Date(bucket[nodeId].lastUpdated); - } - return bucket; - }); + const count = await this.getBucketMetaProp(bucketIndex, 'count'); + await this.setBucketMetaProp(bucketIndex, 'count', count - 1); + await this.db.del(bucketDomain, nodesUtils.bucketDbKey(nodeId)); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, + ); + await this.db.del(lastUpdatedDomain, lastUpdatedKey); + } } /** - * Sets a node to the bucket database - * This may delete an existing node if the bucket is filled up + * Gets a bucket + * The bucket's node IDs is sorted lexicographically by default + * Alternatively you can acquire them sorted by lastUpdated timestamp + * or by distance to the own NodeId */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async setNode( - nodeId: NodeId, - nodeAddress: NodeAddress, - ): Promise { - return await this._transaction(async () => { - const ops = await this.setNodeOps(nodeId, nodeAddress); - await this.db.batch(ops); - }); - } - - protected async setNodeOps( - nodeId: NodeId, - nodeAddress: NodeAddress, - ): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - let bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, - ); - if (bucket == null) { - bucket = {}; + public async getBucket( + bucketIndex: NodeBucketIndex, + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); } - bucket[nodeId] = { - address: nodeAddress, - lastUpdated: new Date(), - }; - // Perform the check on size after we add/update the node. If it's an update, - // then we don't need to perform the deletion - let bucketEntries = Object.entries(bucket); - if (bucketEntries.length > this.maxNodesPerBucket) { - const leastActive = bucketEntries.reduce((prev, curr) => { - return new Date(prev[1].lastUpdated) < new Date(curr[1].lastUpdated) - ? prev - : curr; - }); - delete bucket[leastActive[0]]; - bucketEntries = Object.entries(bucket); - // For safety, make sure that the bucket is actually at maxNodesPerBucket - if (bucketEntries.length !== this.maxNodesPerBucket) { - throw new nodesErrors.ErrorNodeGraphOversizedBucket(); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + const bucketDb = await this.db.level(bucketKey, this.nodeGraphBucketsDb); + for await (const o of bucketDb.createReadStream({ + reverse: order === 'asc' ? false : true, + })) { + const nodeId = nodesUtils.parseBucketDbKey((o as any).key as Buffer); + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt( + data, + false, + ); + bucket.push([nodeId, nodeData]); + } + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + } else if (sort === 'lastUpdated') { + const bucketDb = await this.db.level(bucketKey, this.nodeGraphBucketsDb); + const lastUpdatedBucketDb = await this.db.level( + bucketKey, + this.nodeGraphLastUpdatedDb, + ); + const bucketDbIterator = bucketDb.iterator(); + try { + for await (const indexData of lastUpdatedBucketDb.createValueStream({ + reverse: order === 'asc' ? false : true, + })) { + const nodeIdBuffer = await this.db.deserializeDecrypt( + indexData as Buffer, + true, + ); + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + bucketDbIterator.seek(nodeIdBuffer); + // @ts-ignore + // eslint-disable-next-line + const [, bucketData] = await bucketDbIterator.next(); + const nodeData = await this.db.deserializeDecrypt( + bucketData, + false, + ); + bucket.push([nodeId, nodeData]); + } + } finally { + // @ts-ignore + bucketDbIterator.end(); } } - return [ - { - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }, - ]; + return bucket; } /** - * Updates an existing node - * It will update the lastUpdated time - * Optionally it can replace the NodeAddress + * Gets all buckets + * Buckets are always sorted by `NodeBucketIndex` first + * Then secondly by the `sort` parameter + * The `order` parameter applies to both, for example possible sorts: + * NodeBucketIndex asc, NodeID asc + * NodeBucketIndex desc, NodeId desc + * NodeBucketIndex asc, distance asc + * NodeBucketIndex desc, distance desc + * NodeBucketIndex asc, lastUpdated asc + * NodeBucketIndex desc, lastUpdated desc */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async updateNode( - nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise { - return await this._transaction(async () => { - const ops = await this.updateNodeOps(nodeId, nodeAddress); - await this.db.batch(ops); - }); + public async *getBuckets( + sort: 'nodeId' | 'distance' | 'lastUpdated' = 'nodeId', + order: 'asc' | 'desc' = 'asc', + ): AsyncGenerator<[NodeBucketIndex, NodeBucket]> { + let bucketIndex: NodeBucketIndex | undefined; + let bucket: NodeBucket = []; + if (sort === 'nodeId' || sort === 'distance') { + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + reverse: order === 'asc' ? false : true, + })) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseBucketsDbKey((o as any).key); + const data = (o as any).value; + const nodeData = await this.db.deserializeDecrypt( + data, + false, + ); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + if (sort === 'distance') { + nodesUtils.bucketSortByDistance( + bucket, + this.keyManager.getNodeId(), + order, + ); + } + yield [bucketIndex, bucket]; + } + } else if (sort === 'lastUpdated') { + const bucketsDbIterator = this.nodeGraphBucketsDb.iterator(); + try { + for await (const key of this.nodeGraphLastUpdatedDb.createKeyStream({ + reverse: order === 'asc' ? false : true, + })) { + const { bucketIndex: bucketIndex_, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + bucketsDbIterator.seek(nodesUtils.bucketsDbKey(bucketIndex_, nodeId)); + // @ts-ignore + // eslint-disable-next-line + const [, bucketData] = await bucketsDbIterator.next(); + const nodeData = await this.db.deserializeDecrypt( + bucketData, + false, + ); + if (bucketIndex == null) { + // First entry of the first bucket + bucketIndex = bucketIndex_; + bucket.push([nodeId, nodeData]); + } else if (bucketIndex === bucketIndex_) { + // Subsequent entries of the same bucket + bucket.push([nodeId, nodeData]); + } else if (bucketIndex !== bucketIndex_) { + // New bucket + yield [bucketIndex, bucket]; + bucketIndex = bucketIndex_; + bucket = [[nodeId, nodeData]]; + } + } + // Yield the last bucket if it exists + if (bucketIndex != null) { + yield [bucketIndex, bucket]; + } + } finally { + // @ts-ignore + bucketsDbIterator.end(); + } + } } - protected async updateNodeOps( - nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async resetBuckets(nodeIdOwn: NodeId): Promise { + // Setup new space + const spaceNew = this.space === '0' ? '1' : '0'; + const nodeGraphMetaDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'meta' + spaceNew, + ]; + const nodeGraphBucketsDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'buckets' + spaceNew, + ]; + const nodeGraphLastUpdatedDbDomainNew = [ + this.nodeGraphDbDomain[0], + 'index' + spaceNew, + ]; + // Clear the new space (in case it wasn't cleaned properly last time) + const nodeGraphMetaDbNew = await this.db.level( + nodeGraphMetaDbDomainNew[1], + this.nodeGraphDb, + ); + const nodeGraphBucketsDbNew = await this.db.level( + nodeGraphBucketsDbDomainNew[1], + this.nodeGraphDb, ); - const ops: Array = []; - if (bucket != null && nodeId in bucket) { - bucket[nodeId].lastUpdated = new Date(); - if (nodeAddress != null) { - bucket[nodeId].address = nodeAddress; + const nodeGraphLastUpdatedDbNew = await this.db.level( + nodeGraphLastUpdatedDbDomainNew[1], + this.nodeGraphDb, + ); + await nodeGraphMetaDbNew.clear(); + await nodeGraphBucketsDbNew.clear(); + await nodeGraphLastUpdatedDbNew.clear(); + // Iterating over all entries across all buckets + for await (const o of this.nodeGraphBucketsDb.createReadStream()) { + // The key is a combined bucket key and node ID + const { nodeId } = nodesUtils.parseBucketsDbKey((o as any).key as Buffer); + // If the new own node ID is one of the existing node IDs, it is just dropped + // We only map to the new bucket if it isn't one of the existing node IDs + if (nodeId.equals(nodeIdOwn)) { + continue; } - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }); - } else { - throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); + const bucketIndexNew = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKeyNew = nodesUtils.bucketKey(bucketIndexNew); + const metaDomainNew = [...nodeGraphMetaDbDomainNew, bucketKeyNew]; + const bucketDomainNew = [...nodeGraphBucketsDbDomainNew, bucketKeyNew]; + const indexDomainNew = [...nodeGraphLastUpdatedDbDomainNew, bucketKeyNew]; + const countNew = (await this.db.get(metaDomainNew, 'count')) ?? 0; + if (countNew < this.nodeBucketLimit) { + await this.db.put(metaDomainNew, 'count', countNew + 1); + } else { + const lastUpdatedBucketDbNew = await this.db.level( + bucketKeyNew, + nodeGraphLastUpdatedDbNew, + ); + let oldestIndexKey: Buffer; + let oldestNodeId: NodeId; + for await (const key of lastUpdatedBucketDbNew.createKeyStream({ + limit: 1, + })) { + oldestIndexKey = key as Buffer; + ({ nodeId: oldestNodeId } = nodesUtils.parseLastUpdatedBucketDbKey( + key as Buffer, + )); + } + await this.db.del( + bucketDomainNew, + nodesUtils.bucketDbKey(oldestNodeId!), + ); + await this.db.del(indexDomainNew, oldestIndexKey!); + } + const data = (o as any).value as Buffer; + const nodeData = await this.db.deserializeDecrypt(data, false); + await this.db.put( + bucketDomainNew, + nodesUtils.bucketDbKey(nodeId), + nodeData, + ); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + nodeData.lastUpdated, + nodeId, + ); + await this.db.put( + indexDomainNew, + lastUpdatedKey, + nodesUtils.bucketDbKey(nodeId), + true, + ); } - return ops; + // Swap to the new space + await this.db.put(this.nodeGraphDbDomain, 'space', spaceNew); + // Clear old space + await this.nodeGraphMetaDb.clear(); + await this.nodeGraphBucketsDb.clear(); + await this.nodeGraphLastUpdatedDb.clear(); + // Swap the spaces + this.space = spaceNew; + this.nodeGraphMetaDbDomain = nodeGraphMetaDbDomainNew; + this.nodeGraphBucketsDbDomain = nodeGraphBucketsDbDomainNew; + this.nodeGraphLastUpdatedDbDomain = nodeGraphLastUpdatedDbDomainNew; + this.nodeGraphMetaDb = nodeGraphMetaDbNew; + this.nodeGraphBucketsDb = nodeGraphBucketsDbNew; + this.nodeGraphLastUpdatedDb = nodeGraphLastUpdatedDbNew; } - /** - * Removes a node from the bucket database - * @param nodeId - */ @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async unsetNode(nodeId: NodeId): Promise { - return await this._transaction(async () => { - const ops = await this.unsetNodeOps(nodeId); - await this.db.batch(ops); - }); + public async getBucketMeta( + bucketIndex: NodeBucketIndex, + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); + } + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex), + ]; + const props = await Promise.all([this.db.get(metaDomain, 'count')]); + const [count] = props; + // Bucket meta properties have defaults + return { + count: count ?? 0, + }; } - protected async unsetNodeOps(nodeId: NodeId): Promise> { - const bucketIndex = this.getBucketIndex(nodeId); - const bucket = await this.db.get( - this.nodeGraphBucketsDbDomain, - bucketIndex, - ); - const ops: Array = []; - if (bucket == null) { - return ops; + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + ): Promise { + if (bucketIndex < 0 || bucketIndex >= this.nodeIdBits) { + throw new nodesErrors.ErrorNodeGraphBucketIndex( + `bucketIndex must be between 0 and ${this.nodeIdBits - 1} inclusive`, + ); } - delete bucket[nodeId]; - if (Object.keys(bucket).length === 0) { - ops.push({ - type: 'del', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - }); - } else { - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: bucket, - }); + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex), + ]; + // Bucket meta properties have defaults + let value; + switch (key) { + case 'count': + value = (await this.db.get(metaDomain, key)) ?? 0; + break; } - return ops; + return value; } /** - * Find the correct index of the k-bucket to add a new node to (for this node's - * bucket database). Packs it as a lexicographic integer, such that the order - * of buckets in leveldb is numerical order. + * Finds the set of nodes (of size k) known by the current node (i.e. in its + * buckets database) that have the smallest distance to the target node (i.e. + * are closest to the target node). + * i.e. FIND_NODE RPC from Kademlia spec + * + * Used by the RPC service. + * + * @param nodeId the node ID to find other nodes closest to it + * @param limit the number of closest nodes to return (by default, returns + * according to the maximum number of nodes per bucket) + * @returns a mapping containing exactly k nodeIds -> nodeAddresses (unless the + * current node has less than k nodes in all of its buckets, in which case it + * returns all nodes it has knowledge of) */ - protected getBucketIndex(nodeId: NodeId): string { - const index = nodesUtils.calculateBucketIndex( + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async getClosestNodes( + nodeId: NodeId, + limit: number = this.nodeBucketLimit, + ): Promise { + // Buckets map to the target node in the following way; + // 1. 0, 1, ..., T-1 -> T + // 2. T -> 0, 1, ..., T-1 + // 3. T+1, T+2, ..., 255 are unchanged + // We need to obtain nodes in the following bucket order + // 1. T + // 2. iterate over 0 ---> T-1 + // 3. iterate over T+1 ---> K + // Need to work out the relevant bucket to start from + const localNodeId = this.keyManager.getNodeId(); + const startingBucket = localNodeId.equals(nodeId) + ? 0 + : nodesUtils.bucketIndex(this.keyManager.getNodeId(), nodeId); + // Getting the whole target's bucket first + const nodeIds: NodeBucket = await this.getBucket(startingBucket); + // We need to iterate over the key stream + // When streaming we want all nodes in the starting bucket + // The keys takes the form `!(lexpack bucketId)!(nodeId)` + // We can just use `!(lexpack bucketId)` to start from + // Less than `!(bucketId 101)!` gets us buckets 100 and lower + // greater than `!(bucketId 99)!` gets up buckets 100 and greater + const prefix = Buffer.from([33]); // Code for `!` prefix + if (nodeIds.length < limit) { + // Just before target bucket + const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket)); + const endKeyLower = Buffer.concat([prefix, bucketId, prefix]); + const remainingLimit = limit - nodeIds.length; + // Iterate over lower buckets + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + lt: endKeyLower, + limit: remainingLimit, + })) { + const element = o as any as { key: Buffer; value: Buffer }; + const info = nodesUtils.parseBucketsDbKey(element.key); + const nodeData = await this.db.deserializeDecrypt( + element.value, + false, + ); + nodeIds.push([info.nodeId, nodeData]); + } + } + if (nodeIds.length < limit) { + // Just after target bucket + const bucketId = Buffer.from(nodesUtils.bucketKey(startingBucket + 1)); + const startKeyUpper = Buffer.concat([prefix, bucketId, prefix]); + const remainingLimit = limit - nodeIds.length; + // Iterate over ids further away + for await (const o of this.nodeGraphBucketsDb.createReadStream({ + gt: startKeyUpper, + limit: remainingLimit, + })) { + const element = o as any as { key: Buffer; value: Buffer }; + const info = nodesUtils.parseBucketsDbKey(element.key); + const nodeData = await this.db.deserializeDecrypt( + element.value, + false, + ); + nodeIds.push([info.nodeId, nodeData]); + } + } + // If no nodes were found, return nothing + if (nodeIds.length === 0) return []; + // Need to get the whole of the last bucket + const lastBucketIndex = nodesUtils.bucketIndex( this.keyManager.getNodeId(), - nodeId, + nodeIds[nodeIds.length - 1][0], ); - return lexi.pack(index, 'hex') as string; + const lastBucket = await this.getBucket(lastBucketIndex); + // Pop off elements of the same bucket to avoid duplicates + let element = nodeIds.pop(); + while ( + element != null && + nodesUtils.bucketIndex(this.keyManager.getNodeId(), element[0]) === + lastBucketIndex + ) { + element = nodeIds.pop(); + } + if (element != null) nodeIds.push(element); + // Adding last bucket to the list + nodeIds.push(...lastBucket); + + nodesUtils.bucketSortByDistance(nodeIds, nodeId, 'asc'); + return nodeIds.slice(0, limit); } /** - * Returns all of the buckets in an array + * Sets a bucket meta property + * This is protected because users cannot directly manipulate bucket meta */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async getAllBuckets(): Promise> { - return await this._transaction(async () => { - const buckets: Array = []; - for await (const o of this.nodeGraphBucketsDb.createReadStream()) { - const data = (o as any).value as Buffer; - const bucket = await this.db.deserializeDecrypt( - data, - false, - ); - buckets.push(bucket); - } - return buckets; - }); + protected async setBucketMetaProp( + bucketIndex: NodeBucketIndex, + key: Key, + value: NodeBucketMeta[Key], + ): Promise { + const metaDomain = [ + ...this.nodeGraphMetaDbDomain, + nodesUtils.bucketKey(bucketIndex), + ]; + await this.db.put(metaDomain, key, value); + return; } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. - * NOTE: original nodes may be lost in this process. If they're redistributed - * to a newly full bucket, the least active nodes in the newly full bucket - * will be removed. + * Derive the bucket index of the k-buckets from the new `NodeId` + * The bucket key is the string encoded version of bucket index + * that preserves lexicographic order */ - @ready(new nodesErrors.ErrorNodeGraphNotRunning()) - public async refreshBuckets(): Promise { - return await this._transaction(async () => { - const ops: Array = []; - // Get a local copy of all the buckets - const buckets = await this.getAllBuckets(); - // Wrap as a batch operation. We want to rollback if we encounter any - // errors (such that we don't clear the DB without re-adding the nodes) - // 1. Delete every bucket - for await (const k of this.nodeGraphBucketsDb.createKeyStream()) { - const hexBucketIndex = k as string; - ops.push({ - type: 'del', - domain: this.nodeGraphBucketsDbDomain, - key: hexBucketIndex, - }); - } - const tempBuckets: Record = {}; - // 2. Re-add all the nodes from all buckets - for (const b of buckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - const newIndex = this.getBucketIndex(nodeId); - let expectedBucket = tempBuckets[newIndex]; - // The following is more or less copied from setNodeOps - if (expectedBucket == null) { - expectedBucket = {}; - } - const bucketEntries = Object.entries(expectedBucket); - // Add the old node - expectedBucket[nodeId] = { - address: b[nodeId].address, - lastUpdated: b[nodeId].lastUpdated, - }; - // If, with the old node added, we exceed the limit - if (bucketEntries.length > this.maxNodesPerBucket) { - // Then, with the old node added, find the least active and remove - const leastActive = bucketEntries.reduce((prev, curr) => { - return prev[1].lastUpdated < curr[1].lastUpdated ? prev : curr; - }); - delete expectedBucket[leastActive[0]]; - } - // Add this reconstructed bucket (with old node) into the temp storage - tempBuckets[newIndex] = expectedBucket; - } - } - // Now that we've reconstructed all the buckets, perform batch operations - // on a bucket level (i.e. per bucket, instead of per node) - for (const bucketIndex in tempBuckets) { - ops.push({ - type: 'put', - domain: this.nodeGraphBucketsDbDomain, - key: bucketIndex, - value: tempBuckets[bucketIndex], - }); - } - await this.db.batch(ops); - }); + public bucketIndex(nodeId: NodeId): [NodeBucketIndex, string] { + const nodeIdOwn = this.keyManager.getNodeId(); + if (nodeId.equals(nodeIdOwn)) { + throw new nodesErrors.ErrorNodeGraphSameNodeId(); + } + const bucketIndex = nodesUtils.bucketIndex(nodeIdOwn, nodeId); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + return [bucketIndex, bucketKey]; } } diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index b28343667..0347bffcc 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,23 +1,36 @@ import type { DB } from '@matrixai/db'; import type NodeConnectionManager from './NodeConnectionManager'; import type NodeGraph from './NodeGraph'; +import type Queue from './Queue'; import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; -import type { NodeId, NodeAddress, NodeBucket } from '../nodes/types'; +import type { + NodeId, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '../nodes/types'; import type { ClaimEncoded } from '../claims/types'; +import type { Timer } from '../types'; +import type { PromiseType } from '../utils/utils'; +import type { AbortSignal } from 'node-abort-controller'; import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import { AbortController } from 'node-abort-controller'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; +import * as networkUtils from '../network/utils'; import { utils as validationUtils } from '../validation'; import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; -import * as networkErrors from '../network/errors'; -import * as networkUtils from '../network/utils'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; +import { promise, timerStart } from '../utils/utils'; +interface NodeManager extends StartStop {} +@StartStop() class NodeManager { protected db: DB; protected logger: Logger; @@ -25,6 +38,18 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; + protected queue: Queue; + // Refresh bucket timer + protected refreshBucketDeadlineMap: Map = new Map(); + protected refreshBucketTimer: NodeJS.Timer; + protected refreshBucketNext: NodeBucketIndex; + public readonly refreshBucketTimerDefault; + protected refreshBucketQueue: Set = new Set(); + protected refreshBucketQueueRunning: boolean = false; + protected refreshBucketQueueRunner: Promise; + protected refreshBucketQueuePlug_: PromiseType = promise(); + protected refreshBucketQueueDrained_: PromiseType = promise(); + protected refreshBucketQueueAbortController: AbortController; constructor({ db, @@ -32,6 +57,8 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, + queue, + refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds logger, }: { db: DB; @@ -39,6 +66,8 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; + queue: Queue; + refreshBucketTimerDefault?: number; logger?: Logger; }) { this.logger = logger ?? new Logger(this.constructor.name); @@ -47,32 +76,50 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; + this.queue = queue; + this.refreshBucketTimerDefault = refreshBucketTimerDefault; + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + this.startRefreshBucketTimers(); + this.refreshBucketQueueRunner = this.startRefreshBucketQueue(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + await this.stopRefreshBucketTimers(); + await this.stopRefreshBucketQueue(); + this.logger.info(`Stopped ${this.constructor.name}`); } /** * Determines whether a node in the Polykey network is online. * @return true if online, false if offline + * @param nodeId - NodeId of the node we're pinging + * @param address - Optional Host and Port we want to ping + * @param timer Connection timeout timer */ - public async pingNode(targetNodeId: NodeId): Promise { - const targetAddress: NodeAddress = - await this.nodeConnectionManager.findNode(targetNodeId); - try { - // Attempt to open a connection via the forward proxy - // i.e. no NodeConnection object created (no need for GRPCClient) - await this.nodeConnectionManager.holePunchForward( - targetNodeId, - await networkUtils.resolveHost(targetAddress.host), - targetAddress.port, - ); - } catch (e) { - // If the connection request times out, then return false - if (e instanceof networkErrors.ErrorConnectionStart) { - return false; - } - // Throw any other error back up the callstack - throw e; + public async pingNode( + nodeId: NodeId, + address?: NodeAddress, + timer?: Timer, + ): Promise { + // We need to attempt a connection using the proxies + // For now we will just do a forward connect + relay message + const targetAddress = + address ?? (await this.nodeConnectionManager.findNode(nodeId)); + if (targetAddress == null) { + throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } - return true; + const targetHost = await networkUtils.resolveHost(targetAddress.host); + return await this.nodeConnectionManager.pingNode( + nodeId, + targetHost, + targetAddress.port, + timer, + ); } /** @@ -306,7 +353,7 @@ class NodeManager { public async getNodeAddress( nodeId: NodeId, ): Promise { - return await this.nodeGraph.getNode(nodeId); + return (await this.nodeGraph.getNode(nodeId))?.address; } /** @@ -315,7 +362,7 @@ class NodeManager { * @returns true if the node exists in the table, false otherwise */ public async knowsNode(targetNodeId: NodeId): Promise { - return await this.nodeGraph.knowsNode(targetNodeId); + return (await this.nodeGraph.getNode(targetNodeId)) != null; } /** @@ -326,23 +373,133 @@ class NodeManager { } /** - * Sets a node in the NodeGraph + * Adds a node to the node graph. This assumes that you have already authenticated the node + * Updates the node if the node already exists + * This operation is blocking by default - set `block` to false to make it non-blocking + * @param nodeId - Id of the node we wish to add + * @param nodeAddress - Expected address of the node we want to add + * @param block - Flag for if the operation should block or utilize the async queue + * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. + * This will drop the oldest node in favor of the new. + * @param timeout Connection timeout timeout */ + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, + block: boolean = true, + force: boolean = false, + timeout?: number, ): Promise { - return await this.nodeGraph.setNode(nodeId, nodeAddress); + // When adding a node we need to handle 3 cases + // 1. The node already exists. We need to update it's last updated field + // 2. The node doesn't exist and bucket has room. + // We need to add the node to the bucket + // 3. The node doesn't exist and the bucket is full. + // We need to ping the oldest node. If the ping succeeds we need to update + // the lastUpdated of the oldest node and drop the new one. If the ping + // fails we delete the old node and add in the new one. + const nodeData = await this.nodeGraph.getNode(nodeId); + // If this is a new entry, check the bucket limit + const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); + const count = await this.nodeGraph.getBucketMetaProp(bucketIndex, 'count'); + if (nodeData != null || count < this.nodeGraph.nodeBucketLimit) { + // Either already exists or has room in the bucket + // We want to add or update the node + await this.nodeGraph.setNode(nodeId, nodeAddress); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } else { + // We want to add a node but the bucket is full + // We need to ping the oldest node + if (force) { + // We just add the new node anyway without checking the old one + const oldNodeId = ( + await this.nodeGraph.getOldestNode(bucketIndex, 1) + ).pop()!; + this.logger.debug( + `Force was set, removing ${nodesUtils.encodeNodeId( + oldNodeId, + )} and adding ${nodesUtils.encodeNodeId(nodeId)}`, + ); + await this.nodeGraph.unsetNode(oldNodeId); + await this.nodeGraph.setNode(nodeId, nodeAddress); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + return; + } else if (block) { + this.logger.debug( + `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( + nodeId, + )}`, + ); + await this.garbageCollectOldNode( + bucketIndex, + nodeId, + nodeAddress, + timeout, + ); + } else { + this.logger.debug( + `Bucket was full and blocking was false, adding ${nodesUtils.encodeNodeId( + nodeId, + )} to queue`, + ); + // Re-attempt this later asynchronously by adding the the queue + this.queue.push(() => + this.setNode(nodeId, nodeAddress, true, false, timeout), + ); + } + } } - /** - * Updates the node in the NodeGraph - */ - public async updateNode( + private async garbageCollectOldNode( + bucketIndex: number, nodeId: NodeId, - nodeAddress?: NodeAddress, - ): Promise { - return await this.nodeGraph.updateNode(nodeId, nodeAddress); + nodeAddress: NodeAddress, + timeout?: number, + ) { + const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3); + // We want to concurrently ping the nodes + const pingPromises = oldestNodeIds.map((nodeId) => { + const doPing = async (): Promise<{ + nodeId: NodeId; + success: boolean; + }> => { + // This needs to return nodeId and ping result + const data = await this.nodeGraph.getNode(nodeId); + if (data == null) return { nodeId, success: false }; + const timer = timeout != null ? timerStart(timeout) : undefined; + const result = await this.pingNode(nodeId, nodeAddress, timer); + return { nodeId, success: result }; + }; + return doPing(); + }); + const pingResults = await Promise.all(pingPromises); + for (const { nodeId, success } of pingResults) { + if (success) { + // Ping succeeded, update the node + this.logger.debug( + `Ping succeeded for ${nodesUtils.encodeNodeId(nodeId)}`, + ); + const node = (await this.nodeGraph.getNode(nodeId))!; + await this.nodeGraph.setNode(nodeId, node.address); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } else { + this.logger.debug(`Ping failed for ${nodesUtils.encodeNodeId(nodeId)}`); + // Otherwise we remove the node + await this.nodeGraph.unsetNode(nodeId); + } + } + // Check if we now have room and add the new node + const count = await this.nodeGraph.getBucketMetaProp(bucketIndex, 'count'); + if (count < this.nodeGraph.nodeBucketLimit) { + this.logger.debug(`Bucket ${bucketIndex} now has room, adding new node`); + await this.nodeGraph.setNode(nodeId, nodeAddress); + // Updating the refreshBucket timer + this.refreshBucketUpdateDeadline(bucketIndex); + } } /** @@ -353,18 +510,180 @@ class NodeManager { } /** - * Gets all buckets from the NodeGraph + * To be called on key renewal. Re-orders all nodes in all buckets with respect + * to the new node ID. */ - public async getAllBuckets(): Promise> { - return await this.nodeGraph.getAllBuckets(); + public async resetBuckets(): Promise { + return await this.nodeGraph.resetBuckets(this.keyManager.getNodeId()); } /** - * To be called on key renewal. Re-orders all nodes in all buckets with respect - * to the new node ID. + * Kademlia refresh bucket operation. + * It picks a random node within a bucket and does a search for that node. + * Connections during the search will will share node information with other + * nodes. + * @param bucketIndex + * @param options */ - public async refreshBuckets(): Promise { - return await this.nodeGraph.refreshBuckets(); + public async refreshBucket( + bucketIndex: NodeBucketIndex, + options: { signal?: AbortSignal } = {}, + ) { + const { signal } = { ...options }; + // We need to generate a random nodeId for this bucket + const nodeId = this.keyManager.getNodeId(); + const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( + nodeId, + bucketIndex, + ); + // We then need to start a findNode procedure + await this.nodeConnectionManager.findNode(bucketRandomNodeId, { signal }); + } + + // Refresh bucket activity timer methods + + private startRefreshBucketTimers() { + // Setting initial bucket to refresh + this.refreshBucketNext = 0; + // Setting initial deadline + this.refreshBucketTimerReset(this.refreshBucketTimerDefault); + + for ( + let bucketIndex = 0; + bucketIndex < this.nodeGraph.nodeIdBits; + bucketIndex++ + ) { + const deadline = Date.now() + this.refreshBucketTimerDefault; + this.refreshBucketDeadlineMap.set(bucketIndex, deadline); + } + } + + private async stopRefreshBucketTimers() { + clearTimeout(this.refreshBucketTimer); + } + + private refreshBucketTimerReset(timeout: number) { + clearTimeout(this.refreshBucketTimer); + this.refreshBucketTimer = setTimeout(() => { + this.refreshBucketRefreshTimer(); + }, timeout); + } + + public refreshBucketUpdateDeadline(bucketIndex: NodeBucketIndex) { + // Update the map deadline + this.refreshBucketDeadlineMap.set( + bucketIndex, + Date.now() + this.refreshBucketTimerDefault, + ); + // If the bucket was pending a refresh we remove it + this.refreshBucketQueueRemove(bucketIndex); + if (bucketIndex === this.refreshBucketNext) { + // Bucket is same as next bucket, this affects the timer + this.refreshBucketRefreshTimer(); + } + } + + private refreshBucketRefreshTimer() { + // Getting new closest deadline + let closestBucket = this.refreshBucketNext; + let closestDeadline = Date.now() + this.refreshBucketTimerDefault; + const now = Date.now(); + for (const [bucketIndex, deadline] of this.refreshBucketDeadlineMap) { + // Skip any queued buckets marked by 0 deadline + if (deadline === 0) continue; + if (deadline <= now) { + // Deadline for this has already passed, we add it to the queue + this.refreshBucketQueueAdd(bucketIndex); + continue; + } + if (deadline < closestDeadline) { + closestBucket = bucketIndex; + closestDeadline = deadline; + } + } + // Working out time left + const timeout = closestDeadline - Date.now(); + this.logger.debug( + `Refreshing refreshBucket timer with new timeout ${timeout}`, + ); + // Updating timer and next + this.refreshBucketNext = closestBucket; + this.refreshBucketTimerReset(timeout); + } + + // Refresh bucket async queue methods + + public refreshBucketQueueAdd(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Adding bucket ${bucketIndex} to queue`); + this.refreshBucketDeadlineMap.set(bucketIndex, 0); + this.refreshBucketQueue.add(bucketIndex); + this.refreshBucketQueueUnplug(); + } + + public refreshBucketQueueRemove(bucketIndex: NodeBucketIndex) { + this.logger.debug(`Removing bucket ${bucketIndex} from queue`); + this.refreshBucketQueue.delete(bucketIndex); + } + + public async refreshBucketQueueDrained() { + await this.refreshBucketQueueDrained_.p; + } + + private async startRefreshBucketQueue(): Promise { + this.refreshBucketQueueRunning = true; + this.refreshBucketQueuePlug(); + let iterator: IterableIterator | undefined; + this.refreshBucketQueueAbortController = new AbortController(); + const pace = async () => { + // Wait for plug + await this.refreshBucketQueuePlug_.p; + if (iterator == null) { + iterator = this.refreshBucketQueue[Symbol.iterator](); + } + return this.refreshBucketQueueRunning; + }; + while (await pace()) { + const bucketIndex: NodeBucketIndex = iterator?.next().value; + if (bucketIndex == null) { + // Iterator is empty, plug and continue + iterator = undefined; + this.refreshBucketQueuePlug(); + continue; + } + // Do the job + this.logger.debug( + `processing refreshBucket for bucket ${bucketIndex}, ${this.refreshBucketQueue.size} left in queue`, + ); + try { + await this.refreshBucket(bucketIndex, { + signal: this.refreshBucketQueueAbortController.signal, + }); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeAborted) break; + throw e; + } + // Remove from queue and update bucket deadline + this.refreshBucketQueue.delete(bucketIndex); + this.refreshBucketUpdateDeadline(bucketIndex); + } + this.logger.debug('startRefreshBucketQueue has ended'); + } + + private async stopRefreshBucketQueue(): Promise { + // Flag end and await queue finish + this.refreshBucketQueueAbortController.abort(); + this.refreshBucketQueueRunning = false; + this.refreshBucketQueueUnplug(); + } + + private refreshBucketQueuePlug() { + this.refreshBucketQueuePlug_ = promise(); + this.refreshBucketQueueDrained_?.resolveP(); + } + + private refreshBucketQueueUnplug() { + this.refreshBucketQueueDrained_ = promise(); + this.refreshBucketQueuePlug_?.resolveP(); } } diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts new file mode 100644 index 000000000..0f9c1485e --- /dev/null +++ b/src/nodes/Queue.ts @@ -0,0 +1,91 @@ +import type { PromiseType } from '../utils'; +import Logger from '@matrixai/logger'; +import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import * as nodesErrors from './errors'; +import { promise } from '../utils'; + +interface Queue extends StartStop {} +@StartStop() +class Queue { + protected logger: Logger; + protected end: boolean = false; + protected queue: Array<() => Promise> = []; + protected runner: Promise; + protected plug_: PromiseType = promise(); + protected drained_: PromiseType = promise(); + + constructor({ logger }: { logger?: Logger }) { + this.logger = logger ?? new Logger(this.constructor.name); + } + + public async start() { + this.logger.info(`Starting ${this.constructor.name}`); + const start = async () => { + this.logger.debug('Starting queue'); + this.plug(); + const pace = async () => { + await this.plug_.p; + return !this.end; + }; + // While queue hasn't ended + while (await pace()) { + const job = this.queue.shift(); + if (job == null) { + // If the queue is empty then we pause the queue + this.plug(); + continue; + } + try { + await job(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + this.logger.debug('queue has ended'); + }; + this.runner = start(); + this.logger.info(`Started ${this.constructor.name}`); + } + + public async stop() { + this.logger.info(`Stopping ${this.constructor.name}`); + this.logger.debug('Stopping queue'); + // Tell the queue runner to end + this.end = true; + this.unplug(); + // Wait for runner to finish it's current job + await this.runner; + this.logger.info(`Stopped ${this.constructor.name}`); + } + + /** + * This adds a setNode operation to the queue + */ + public push(f: () => Promise): void { + this.queue.push(f); + this.unplug(); + } + + @ready(new nodesErrors.ErrorQueueNotRunning()) + public async drained(): Promise { + await this.drained_.p; + } + + private plug(): void { + this.logger.debug('Plugging queue'); + // Pausing queue + this.plug_ = promise(); + // Signaling queue is empty + this.drained_.resolveP(); + } + + private unplug(): void { + this.logger.debug('Unplugging queue'); + // Starting queue + this.plug_.resolveP(); + // Signalling queue is running + this.drained_ = promise(); + } +} + +export default Queue; diff --git a/src/nodes/errors.ts b/src/nodes/errors.ts index a7074ae41..003cf40ff 100644 --- a/src/nodes/errors.ts +++ b/src/nodes/errors.ts @@ -2,6 +2,21 @@ import { ErrorPolykey, sysexits } from '../errors'; class ErrorNodes extends ErrorPolykey {} +class ErrorNodeAborted extends ErrorNodes { + description = 'Operation was aborted'; + exitCode = sysexits.USAGE; +} + +class ErrorNodeManagerNotRunning extends ErrorNodes { + description = 'NodeManager is not running'; + exitCode = sysexits.USAGE; +} + +class ErrorQueueNotRunning extends ErrorNodes { + description = 'queue is not running'; + exitCode = sysexits.USAGE; +} + class ErrorNodeGraphRunning extends ErrorNodes { description = 'NodeGraph is running'; exitCode = sysexits.USAGE; @@ -37,6 +52,11 @@ class ErrorNodeGraphSameNodeId extends ErrorNodes { exitCode = sysexits.USAGE; } +class ErrorNodeGraphBucketIndex extends ErrorNodes { + description: 'Bucket index is out of range'; + exitCode = sysexits.USAGE; +} + class ErrorNodeConnectionDestroyed extends ErrorNodes { description = 'NodeConnection is destroyed'; exitCode = sysexits.USAGE; @@ -69,6 +89,9 @@ class ErrorNodeConnectionHostWildcard extends ErrorNodes { export { ErrorNodes, + ErrorNodeAborted, + ErrorNodeManagerNotRunning, + ErrorQueueNotRunning, ErrorNodeGraphRunning, ErrorNodeGraphNotRunning, ErrorNodeGraphDestroyed, @@ -76,6 +99,7 @@ export { ErrorNodeGraphEmptyDatabase, ErrorNodeGraphOversizedBucket, ErrorNodeGraphSameNodeId, + ErrorNodeGraphBucketIndex, ErrorNodeConnectionDestroyed, ErrorNodeConnectionTimeout, ErrorNodeConnectionInfoNotExist, diff --git a/src/nodes/types.ts b/src/nodes/types.ts index ffb916851..8e173b4f2 100644 --- a/src/nodes/types.ts +++ b/src/nodes/types.ts @@ -4,6 +4,10 @@ import type { Host, Hostname, Port } from '../network/types'; import type { Claim, ClaimId } from '../claims/types'; import type { ChainData } from '../sigchain/types'; +// This should be a string +// actually cause it is a domain +type NodeGraphSpace = '0' | '1'; + type NodeId = Opaque<'NodeId', Id>; type NodeIdString = Opaque<'NodeIdString', string>; type NodeIdEncoded = Opaque<'NodeIdEncoded', string>; @@ -13,9 +17,43 @@ type NodeAddress = { port: Port; }; -type SeedNodes = Record; +type NodeBucketIndex = number; +// Type NodeBucket = Record; + +// TODO: +// No longer need to use NodeIdString +// It's an array, if you want to lookup +// It's ordered by the last updated date +// On the other hand, does this matter +// Not really? +// USE THIS TYPE INSTEAD +type NodeBucket = Array<[NodeId, NodeData]>; + +type NodeBucketMeta = { + count: number; +}; + +// Type NodeBucketMetaProps = NonFunctionProperties; + +// Just make the bucket entries also +// bucketIndex anot as a key +// but as the domain +// !!NodeGraph!!meta!!ff!!count type NodeData = { + address: NodeAddress; + lastUpdated: number; +}; + +// Type NodeBucketEntry = { +// address: NodeAddress; +// lastUpdated: Date; +// }; + +type SeedNodes = Record; + +// FIXME: should have a proper name +type NodeEntry = { id: NodeId; address: NodeAddress; distance: BigInt; @@ -41,16 +79,6 @@ type NodeInfo = { chain: ChainData; }; -type NodeBucketIndex = number; - -// The data type to be stored in each leveldb entry for the node table -type NodeBucket = { - [key: string]: { - address: NodeAddress; - lastUpdated: Date; - }; -}; - // Only 1 domain, so don't need a 'domain' value (like /gestalts/types.ts) type NodeGraphOp_ = { // Bucket index @@ -72,10 +100,15 @@ export type { NodeIdEncoded, NodeAddress, SeedNodes, - NodeData, NodeClaim, NodeInfo, NodeBucketIndex, + NodeBucketMeta, NodeBucket, + NodeData, + NodeEntry, + // NodeBucketEntry, + NodeGraphOp, + NodeGraphSpace, }; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 696e31d43..c61a6cd58 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -1,29 +1,75 @@ -import type { NodeData, NodeId, NodeIdEncoded } from './types'; +import type { + NodeId, + NodeIdEncoded, + NodeBucket, + NodeBucketIndex, +} from './types'; import { IdInternal } from '@matrixai/id'; -import { bytes2BigInt } from '../utils'; +import lexi from 'lexicographic-integer'; +import { bytes2BigInt, bufferSplit } from '../utils'; +import * as keysUtils from '../keys/utils'; + +// FIXME: +const prefixBuffer = Buffer.from([33]); +// Const prefixBuffer = Buffer.from(dbUtils.prefix); /** - * Compute the distance between two nodes. - * distance = nodeId1 ^ nodeId2 - * where ^ = bitwise XOR operator + * Encodes the NodeId as a `base32hex` string */ -function calculateDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { - const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); - return bytes2BigInt(distance); +function encodeNodeId(nodeId: NodeId): NodeIdEncoded { + return nodeId.toMultibase('base32hex') as NodeIdEncoded; } /** - * Find the correct index of the k-bucket to add a new node to. + * Decodes an encoded NodeId string into a NodeId + */ +function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { + if (typeof nodeIdEncoded !== 'string') { + return; + } + const nodeId = IdInternal.fromMultibase(nodeIdEncoded); + if (nodeId == null) { + return; + } + // All NodeIds are 32 bytes long + // The NodeGraph requires a fixed size for Node Ids + if (nodeId.length !== 32) { + return; + } + return nodeId; +} + +/** + * Calculate the bucket index that the target node should be located in * A node's k-buckets are organised such that for the ith k-bucket where * 0 <= i < nodeIdBits, the contacts in this ith bucket are known to adhere to * the following inequality: * 2^i <= distance (from current node) < 2^(i+1) + * This means lower buckets will have less nodes then the upper buckets. + * The highest bucket will contain half of all possible nodes. + * The lowest bucket will only contain 1 node. * * NOTE: because XOR is a commutative operation (i.e. a XOR b = b XOR a), the * order of the passed parameters is actually irrelevant. These variables are * purely named for communicating function purpose. + * + * NOTE: Kademlia literature generally talks about buckets with 1-based indexing + * and that the buckets are ordered from largest to smallest. This means the first + * 1th-bucket is far & large bucket, and the last 255th-bucket is the close bucket. + * This is reversed in our `NodeBucketIndex` encoding. This is so that lexicographic + * sort orders our buckets from closest bucket to farthest bucket. + * + * To convert from `NodeBucketIndex` to nth-bucket in Kademlia literature: + * + * | NodeBucketIndex | Nth-Bucket | + * | --------------- | ---------- | + * | 255 | 1 | farthest & largest + * | 254 | 2 | + * | ... | ... | + * | 1 | 254 | + * | 0 | 256 | closest & smallest */ -function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { +function bucketIndex(sourceNode: NodeId, targetNode: NodeId): NodeBucketIndex { const distance = sourceNode.map((byte, i) => byte ^ targetNode[i]); const MSByteIndex = distance.findIndex((byte) => byte !== 0); if (MSByteIndex === -1) { @@ -37,48 +83,262 @@ function calculateBucketIndex(sourceNode: NodeId, targetNode: NodeId): number { } /** - * A sorting compareFn to sort an array of NodeData by increasing distance. + * Encodes bucket index to bucket sublevel key */ -function sortByDistance(a: NodeData, b: NodeData) { - if (a.distance > b.distance) { - return 1; - } else if (a.distance < b.distance) { - return -1; - } else { - return 0; +function bucketKey(bucketIndex: NodeBucketIndex): string { + return lexi.pack(bucketIndex, 'hex'); +} + +/** + * Creates key for buckets sublevel + */ +function bucketsDbKey(bucketIndex: NodeBucketIndex, nodeId: NodeId): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + bucketDbKey(nodeId), + ]); +} + +/** + * Creates key for single bucket sublevel + */ +function bucketDbKey(nodeId: NodeId): Buffer { + return nodeId.toBuffer(); +} + +/** + * Creates key for buckets indexed by lastUpdated sublevel + */ +function lastUpdatedBucketsDbKey( + bucketIndex: NodeBucketIndex, + lastUpdated: number, + nodeId: NodeId, +): Buffer { + return Buffer.concat([ + prefixBuffer, + Buffer.from(bucketKey(bucketIndex)), + prefixBuffer, + lastUpdatedBucketDbKey(lastUpdated, nodeId), + ]); +} + +/** + * Creates key for single bucket indexed by lastUpdated sublevel + */ +function lastUpdatedBucketDbKey(lastUpdated: number, nodeId: NodeId): Buffer { + return Buffer.concat([ + Buffer.from(lexi.pack(lastUpdated, 'hex')), + Buffer.from('-'), + nodeId.toBuffer(), + ]); +} + +/** + * Parse the NodeGraph buckets sublevel key + * The keys look like `!!` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true, + ); + if (bucketKeyBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph buckets key'); } + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + bucketIndex, + bucketKey, + nodeId, + }; } /** - * Encodes the NodeId as a `base32hex` string + * Parse the NodeGraph bucket key + * The keys look like `` */ -function encodeNodeId(nodeId: NodeId): NodeIdEncoded { - return nodeId.toMultibase('base32hex') as NodeIdEncoded; +function parseBucketDbKey(keyBuffer: Buffer): NodeId { + const nodeId = IdInternal.fromBuffer(keyBuffer); + return nodeId; } /** - * Decodes an encoded NodeId string into a NodeId + * Parse the NodeGraph index sublevel key + * The keys look like `!!-` + * It is assumed that the `!` is the sublevel prefix. */ -function decodeNodeId(nodeIdEncoded: any): NodeId | undefined { - if (typeof nodeIdEncoded !== 'string') { - return; +function parseLastUpdatedBucketsDbKey(keyBuffer: Buffer): { + bucketIndex: NodeBucketIndex; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; +} { + const [, bucketKeyBuffer, lastUpdatedBuffer] = bufferSplit( + keyBuffer, + prefixBuffer, + 3, + true, + ); + if (bucketKeyBuffer == null || lastUpdatedBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - const nodeId = IdInternal.fromMultibase(nodeIdEncoded); - if (nodeId == null) { - return; + const bucketKey = bucketKeyBuffer.toString(); + const bucketIndex = lexi.unpack(bucketKey); + if (bucketIndex == null) { + throw new TypeError('Buffer is not an NodeGraph index key'); } - // All NodeIds are 32 bytes long - // The NodeGraph requires a fixed size for Node Ids - if (nodeId.length !== 32) { - return; + const { lastUpdated, nodeId } = + parseLastUpdatedBucketDbKey(lastUpdatedBuffer); + return { + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + }; +} + +/** + * Parse the NodeGraph index bucket sublevel key + * The keys look like `-` + * It is assumed that the `!` is the sublevel prefix. + */ +function parseLastUpdatedBucketDbKey(keyBuffer: Buffer): { + lastUpdated: number; + nodeId: NodeId; +} { + const [lastUpdatedBuffer, nodeIdBuffer] = bufferSplit( + keyBuffer, + Buffer.from('-'), + 2, + true, + ); + if (lastUpdatedBuffer == null || nodeIdBuffer == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); } - return nodeId; + const lastUpdated = lexi.unpack(lastUpdatedBuffer.toString()); + if (lastUpdated == null) { + throw new TypeError('Buffer is not an NodeGraph index bucket key'); + } + const nodeId = IdInternal.fromBuffer(nodeIdBuffer); + return { + lastUpdated, + nodeId, + }; +} + +/** + * Compute the distance between two nodes. + * distance = nodeId1 ^ nodeId2 + * where ^ = bitwise XOR operator + */ +function nodeDistance(nodeId1: NodeId, nodeId2: NodeId): bigint { + const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); + return bytes2BigInt(distance); +} + +function bucketSortByDistance( + bucket: NodeBucket, + nodeId: NodeId, + order: 'asc' | 'desc' = 'asc', +): void { + const distances = {}; + if (order === 'asc') { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 < d2) { + return -1; + } else if (d1 > d2) { + return 1; + } else { + return 0; + } + }); + } else { + bucket.sort(([nodeId1], [nodeId2]) => { + const d1 = (distances[nodeId1] = + distances[nodeId1] ?? nodeDistance(nodeId, nodeId1)); + const d2 = (distances[nodeId2] = + distances[nodeId2] ?? nodeDistance(nodeId, nodeId2)); + if (d1 > d2) { + return -1; + } else if (d1 < d2) { + return 1; + } else { + return 0; + } + }); + } +} + +function generateRandomDistanceForBucket(bucketIndex: NodeBucketIndex): NodeId { + const buffer = keysUtils.getRandomBytesSync(32); + // Calculate the most significant byte for bucket + const base = bucketIndex / 8; + const mSigByte = Math.floor(base); + const mSigBit = (base - mSigByte) * 8 + 1; + const mSigByteIndex = buffer.length - mSigByte - 1; + // Creating masks + // AND mask should look like 0b00011111 + // OR mask should look like 0b00010000 + const shift = 8 - mSigBit; + const andMask = 0b11111111 >>> shift; + const orMask = 0b10000000 >>> shift; + let byte = buffer[mSigByteIndex]; + byte = byte & andMask; // Forces 0 for bits above bucket bit + byte = byte | orMask; // Forces 1 in the desired bucket bit + buffer[mSigByteIndex] = byte; + // Zero out byte 'above' mSigByte + for (let byteIndex = 0; byteIndex < mSigByteIndex; byteIndex++) { + buffer[byteIndex] = 0; + } + return IdInternal.fromBuffer(buffer); +} + +function xOrNodeId(node1: NodeId, node2: NodeId): NodeId { + const xOrNodeArray = node1.map((byte, i) => byte ^ node2[i]); + const xOrNodeBuffer = Buffer.from(xOrNodeArray); + return IdInternal.fromBuffer(xOrNodeBuffer); +} + +function generateRandomNodeIdForBucket( + nodeId: NodeId, + bucket: NodeBucketIndex, +): NodeId { + const randomDistanceForBucket = generateRandomDistanceForBucket(bucket); + return xOrNodeId(nodeId, randomDistanceForBucket); } export { - calculateDistance, - calculateBucketIndex, - sortByDistance, + prefixBuffer, encodeNodeId, decodeNodeId, + bucketIndex, + bucketKey, + bucketsDbKey, + bucketDbKey, + lastUpdatedBucketsDbKey, + lastUpdatedBucketDbKey, + parseBucketsDbKey, + parseBucketDbKey, + parseLastUpdatedBucketsDbKey, + parseLastUpdatedBucketDbKey, + nodeDistance, + bucketSortByDistance, + generateRandomDistanceForBucket, + xOrNodeId, + generateRandomNodeIdForBucket, }; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts index 023631a45..067688187 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.d.ts @@ -27,6 +27,7 @@ interface IClientServiceService extends grpc.ServiceDefinition; responseDeserialize: grpc.deserialize; } +interface IClientServiceService_INodesGetAll extends grpc.MethodDefinition { + path: "/polykey.v1.ClientService/NodesGetAll"; + requestStream: false; + responseStream: false; + requestSerialize: grpc.serialize; + requestDeserialize: grpc.deserialize; + responseSerialize: grpc.serialize; + responseDeserialize: grpc.deserialize; +} interface IClientServiceService_IKeysKeyPairRoot extends grpc.MethodDefinition { path: "/polykey.v1.ClientService/KeysKeyPairRoot"; requestStream: false; @@ -673,6 +683,7 @@ export interface IClientServiceServer extends grpc.UntypedServiceImplementation nodesPing: grpc.handleUnaryCall; nodesClaim: grpc.handleUnaryCall; nodesFind: grpc.handleUnaryCall; + nodesGetAll: grpc.handleUnaryCall; keysKeyPairRoot: grpc.handleUnaryCall; keysKeyPairReset: grpc.handleUnaryCall; keysKeyPairRenew: grpc.handleUnaryCall; @@ -756,6 +767,9 @@ export interface IClientServiceClient { nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; @@ -941,6 +955,9 @@ export class ClientServiceClient extends grpc.Client implements IClientServiceCl public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; public nodesFind(request: polykey_v1_nodes_nodes_pb.Node, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeAddress) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; + public nodesGetAll(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_nodes_nodes_pb.NodeBuckets) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; public keysKeyPairRoot(request: polykey_v1_utils_utils_pb.EmptyMessage, metadata: grpc.Metadata, options: Partial, callback: (error: grpc.ServiceError | null, response: polykey_v1_keys_keys_pb.KeyPair) => void): grpc.ClientUnaryCall; diff --git a/src/proto/js/polykey/v1/client_service_grpc_pb.js b/src/proto/js/polykey/v1/client_service_grpc_pb.js index ede2e9470..642127423 100644 --- a/src/proto/js/polykey/v1/client_service_grpc_pb.js +++ b/src/proto/js/polykey/v1/client_service_grpc_pb.js @@ -212,6 +212,17 @@ function deserialize_polykey_v1_nodes_NodeAddress(buffer_arg) { return polykey_v1_nodes_nodes_pb.NodeAddress.deserializeBinary(new Uint8Array(buffer_arg)); } +function serialize_polykey_v1_nodes_NodeBuckets(arg) { + if (!(arg instanceof polykey_v1_nodes_nodes_pb.NodeBuckets)) { + throw new Error('Expected argument of type polykey.v1.nodes.NodeBuckets'); + } + return Buffer.from(arg.serializeBinary()); +} + +function deserialize_polykey_v1_nodes_NodeBuckets(buffer_arg) { + return polykey_v1_nodes_nodes_pb.NodeBuckets.deserializeBinary(new Uint8Array(buffer_arg)); +} + function serialize_polykey_v1_notifications_List(arg) { if (!(arg instanceof polykey_v1_notifications_notifications_pb.List)) { throw new Error('Expected argument of type polykey.v1.notifications.List'); @@ -557,6 +568,17 @@ nodesAdd: { responseSerialize: serialize_polykey_v1_nodes_NodeAddress, responseDeserialize: deserialize_polykey_v1_nodes_NodeAddress, }, + nodesGetAll: { + path: '/polykey.v1.ClientService/NodesGetAll', + requestStream: false, + responseStream: false, + requestType: polykey_v1_utils_utils_pb.EmptyMessage, + responseType: polykey_v1_nodes_nodes_pb.NodeBuckets, + requestSerialize: serialize_polykey_v1_utils_EmptyMessage, + requestDeserialize: deserialize_polykey_v1_utils_EmptyMessage, + responseSerialize: serialize_polykey_v1_nodes_NodeBuckets, + responseDeserialize: deserialize_polykey_v1_nodes_NodeBuckets, + }, // Keys keysKeyPairRoot: { path: '/polykey.v1.ClientService/KeysKeyPairRoot', diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts index 0da62ce43..79d0fbd58 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.d.ts @@ -98,6 +98,28 @@ export namespace Claim { } } +export class NodeBuckets extends jspb.Message { + + getBucketsMap(): jspb.Map; + clearBucketsMap(): void; + + serializeBinary(): Uint8Array; + toObject(includeInstance?: boolean): NodeBuckets.AsObject; + static toObject(includeInstance: boolean, msg: NodeBuckets): NodeBuckets.AsObject; + static extensions: {[key: number]: jspb.ExtensionFieldInfo}; + static extensionsBinary: {[key: number]: jspb.ExtensionFieldBinaryInfo}; + static serializeBinaryToWriter(message: NodeBuckets, writer: jspb.BinaryWriter): void; + static deserializeBinary(bytes: Uint8Array): NodeBuckets; + static deserializeBinaryFromReader(message: NodeBuckets, reader: jspb.BinaryReader): NodeBuckets; +} + +export namespace NodeBuckets { + export type AsObject = { + + bucketsMap: Array<[number, NodeTable.AsObject]>, + } +} + export class Connection extends jspb.Message { getAId(): string; setAId(value: string): Connection; diff --git a/src/proto/js/polykey/v1/nodes/nodes_pb.js b/src/proto/js/polykey/v1/nodes/nodes_pb.js index 01d29ce4f..8fe0c189f 100644 --- a/src/proto/js/polykey/v1/nodes/nodes_pb.js +++ b/src/proto/js/polykey/v1/nodes/nodes_pb.js @@ -25,6 +25,7 @@ goog.exportSymbol('proto.polykey.v1.nodes.Connection', null, global); goog.exportSymbol('proto.polykey.v1.nodes.CrossSign', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Node', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeAddress', null, global); +goog.exportSymbol('proto.polykey.v1.nodes.NodeBuckets', null, global); goog.exportSymbol('proto.polykey.v1.nodes.NodeTable', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Relay', null, global); goog.exportSymbol('proto.polykey.v1.nodes.Signature', null, global); @@ -112,6 +113,27 @@ if (goog.DEBUG && !COMPILED) { */ proto.polykey.v1.nodes.Claim.displayName = 'proto.polykey.v1.nodes.Claim'; } +/** + * Generated by JsPbCodeGenerator. + * @param {Array=} opt_data Optional initial data array, typically from a + * server response, or constructed directly in Javascript. The array is used + * in place and becomes part of the constructed object. It is not cloned. + * If no data is provided, the constructed object will be empty, but still + * valid. + * @extends {jspb.Message} + * @constructor + */ +proto.polykey.v1.nodes.NodeBuckets = function(opt_data) { + jspb.Message.initialize(this, opt_data, 0, -1, null, null); +}; +goog.inherits(proto.polykey.v1.nodes.NodeBuckets, jspb.Message); +if (goog.DEBUG && !COMPILED) { + /** + * @public + * @override + */ + proto.polykey.v1.nodes.NodeBuckets.displayName = 'proto.polykey.v1.nodes.NodeBuckets'; +} /** * Generated by JsPbCodeGenerator. * @param {Array=} opt_data Optional initial data array, typically from a @@ -956,6 +978,139 @@ proto.polykey.v1.nodes.Claim.prototype.setForceInvite = function(value) { +if (jspb.Message.GENERATE_TO_OBJECT) { +/** + * Creates an object representation of this proto. + * Field names that are reserved in JavaScript and will be renamed to pb_name. + * Optional fields that are not set will be set to undefined. + * To access a reserved field use, foo.pb_, eg, foo.pb_default. + * For the list of reserved names please see: + * net/proto2/compiler/js/internal/generator.cc#kKeyword. + * @param {boolean=} opt_includeInstance Deprecated. whether to include the + * JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @return {!Object} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.toObject = function(opt_includeInstance) { + return proto.polykey.v1.nodes.NodeBuckets.toObject(opt_includeInstance, this); +}; + + +/** + * Static version of the {@see toObject} method. + * @param {boolean|undefined} includeInstance Deprecated. Whether to include + * the JSPB instance for transitional soy proto support: + * http://goto/soy-param-migration + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The msg instance to transform. + * @return {!Object} + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.toObject = function(includeInstance, msg) { + var f, obj = { + bucketsMap: (f = msg.getBucketsMap()) ? f.toObject(includeInstance, proto.polykey.v1.nodes.NodeTable.toObject) : [] + }; + + if (includeInstance) { + obj.$jspbMessageInstance = msg; + } + return obj; +}; +} + + +/** + * Deserializes binary data (in protobuf wire format). + * @param {jspb.ByteSource} bytes The bytes to deserialize. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinary = function(bytes) { + var reader = new jspb.BinaryReader(bytes); + var msg = new proto.polykey.v1.nodes.NodeBuckets; + return proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader(msg, reader); +}; + + +/** + * Deserializes binary data (in protobuf wire format) from the + * given reader into the given message object. + * @param {!proto.polykey.v1.nodes.NodeBuckets} msg The message object to deserialize into. + * @param {!jspb.BinaryReader} reader The BinaryReader to use. + * @return {!proto.polykey.v1.nodes.NodeBuckets} + */ +proto.polykey.v1.nodes.NodeBuckets.deserializeBinaryFromReader = function(msg, reader) { + while (reader.nextField()) { + if (reader.isEndGroup()) { + break; + } + var field = reader.getFieldNumber(); + switch (field) { + case 1: + var value = msg.getBucketsMap(); + reader.readMessage(value, function(message, reader) { + jspb.Map.deserializeBinary(message, reader, jspb.BinaryReader.prototype.readInt32, jspb.BinaryReader.prototype.readMessage, proto.polykey.v1.nodes.NodeTable.deserializeBinaryFromReader, 0, new proto.polykey.v1.nodes.NodeTable()); + }); + break; + default: + reader.skipField(); + break; + } + } + return msg; +}; + + +/** + * Serializes the message to binary data (in protobuf wire format). + * @return {!Uint8Array} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.serializeBinary = function() { + var writer = new jspb.BinaryWriter(); + proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter(this, writer); + return writer.getResultBuffer(); +}; + + +/** + * Serializes the given message to binary data (in protobuf wire + * format), writing to the given BinaryWriter. + * @param {!proto.polykey.v1.nodes.NodeBuckets} message + * @param {!jspb.BinaryWriter} writer + * @suppress {unusedLocalVariables} f is only used for nested messages + */ +proto.polykey.v1.nodes.NodeBuckets.serializeBinaryToWriter = function(message, writer) { + var f = undefined; + f = message.getBucketsMap(true); + if (f && f.getLength() > 0) { + f.serializeBinary(1, writer, jspb.BinaryWriter.prototype.writeInt32, jspb.BinaryWriter.prototype.writeMessage, proto.polykey.v1.nodes.NodeTable.serializeBinaryToWriter); + } +}; + + +/** + * map buckets = 1; + * @param {boolean=} opt_noLazyCreate Do not create the map if + * empty, instead returning `undefined` + * @return {!jspb.Map} + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.getBucketsMap = function(opt_noLazyCreate) { + return /** @type {!jspb.Map} */ ( + jspb.Message.getMapField(this, 1, opt_noLazyCreate, + proto.polykey.v1.nodes.NodeTable)); +}; + + +/** + * Clears values from the map. The map will be non-null. + * @return {!proto.polykey.v1.nodes.NodeBuckets} returns this + */ +proto.polykey.v1.nodes.NodeBuckets.prototype.clearBucketsMap = function() { + this.getBucketsMap().clear(); + return this;}; + + + + + if (jspb.Message.GENERATE_TO_OBJECT) { /** * Creates an object representation of this proto. diff --git a/src/proto/schemas/polykey/v1/client_service.proto b/src/proto/schemas/polykey/v1/client_service.proto index 57788c678..81782f13b 100644 --- a/src/proto/schemas/polykey/v1/client_service.proto +++ b/src/proto/schemas/polykey/v1/client_service.proto @@ -26,6 +26,7 @@ service ClientService { rpc NodesPing(polykey.v1.nodes.Node) returns (polykey.v1.utils.StatusMessage); rpc NodesClaim(polykey.v1.nodes.Claim) returns (polykey.v1.utils.StatusMessage); rpc NodesFind(polykey.v1.nodes.Node) returns (polykey.v1.nodes.NodeAddress); + rpc NodesGetAll(polykey.v1.utils.EmptyMessage) returns (polykey.v1.nodes.NodeBuckets); // Keys rpc KeysKeyPairRoot (polykey.v1.utils.EmptyMessage) returns (polykey.v1.keys.KeyPair); diff --git a/src/proto/schemas/polykey/v1/nodes/nodes.proto b/src/proto/schemas/polykey/v1/nodes/nodes.proto index 4c5d64a51..bd2b54f85 100644 --- a/src/proto/schemas/polykey/v1/nodes/nodes.proto +++ b/src/proto/schemas/polykey/v1/nodes/nodes.proto @@ -25,6 +25,11 @@ message Claim { bool force_invite = 2; } +// Bucket index -> a node bucket (from NodeGraph) +message NodeBuckets { + map buckets = 1; +} + // Agent specific. message Connection { diff --git a/src/sigchain/utils.ts b/src/sigchain/utils.ts index 7f40dd6a3..fe8cc83f8 100644 --- a/src/sigchain/utils.ts +++ b/src/sigchain/utils.ts @@ -19,7 +19,7 @@ async function verifyChainData( continue; } // If verified, add the claim to the decoded chain - decodedChain[claimId] = await claimsUtils.decodeClaim(encodedClaim); + decodedChain[claimId] = claimsUtils.decodeClaim(encodedClaim); } return decodedChain; } diff --git a/src/types.ts b/src/types.ts index b09954b32..6762c5fba 100644 --- a/src/types.ts +++ b/src/types.ts @@ -72,6 +72,24 @@ interface FileSystem { type FileHandle = fs.promises.FileHandle; +type FunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? K : never; +}[keyof T]; + +/** + * Functional properties of an object + */ +type FunctionProperties = Pick>; + +type NonFunctionPropertyNames = { + [K in keyof T]: T[K] extends (...args: any[]) => any ? never : K; +}[keyof T]; + +/** + * Non-functional properties of an object + */ +type NonFunctionProperties = Pick>; + export type { POJO, Opaque, @@ -83,4 +101,6 @@ export type { Timer, FileSystem, FileHandle, + FunctionProperties, + NonFunctionProperties, }; diff --git a/src/utils/context.ts b/src/utils/context.ts index d4102debc..ad6af69ee 100644 --- a/src/utils/context.ts +++ b/src/utils/context.ts @@ -2,7 +2,7 @@ type ResourceAcquire = () => Promise< readonly [ResourceRelease, Resource?] >; -type ResourceRelease = () => Promise; +type ResourceRelease = (e?: Error) => Promise; type Resources[]> = { [K in keyof T]: T[K] extends ResourceAcquire ? R : never; @@ -22,6 +22,7 @@ async function withF< ): Promise { const releases: Array = []; const resources: Array = []; + let e_: Error | undefined; try { for (const acquire of acquires) { const [release, resource] = await acquire(); @@ -29,10 +30,13 @@ async function withF< resources.push(resource); } return await f(resources as unknown as Resources); + } catch (e) { + e_ = e; + throw e; } finally { releases.reverse(); for (const release of releases) { - await release(); + await release(e_); } } } @@ -55,6 +59,7 @@ async function* withG< ): AsyncGenerator { const releases: Array = []; const resources: Array = []; + let e_: Error | undefined; try { for (const acquire of acquires) { const [release, resource] = await acquire(); @@ -62,10 +67,13 @@ async function* withG< resources.push(resource); } return yield* g(resources as unknown as Resources); + } catch (e) { + e_ = e; + throw e; } finally { releases.reverse(); for (const release of releases) { - await release(); + await release(e_); } } } diff --git a/src/utils/index.ts b/src/utils/index.ts index cbb38a8be..08bc47f16 100644 --- a/src/utils/index.ts +++ b/src/utils/index.ts @@ -4,4 +4,5 @@ export * from './context'; export * from './utils'; export * from './matchers'; export * from './binary'; +export * from './random'; export * as errors from './errors'; diff --git a/src/utils/locks.ts b/src/utils/locks.ts index eb6f95245..b097dab16 100644 --- a/src/utils/locks.ts +++ b/src/utils/locks.ts @@ -73,6 +73,14 @@ class RWLock { return this.readersLock.isLocked() || this.writersLock.isLocked(); } + public isLockedReader(): boolean { + return this.readersLock.isLocked(); + } + + public isLockedWriter(): boolean { + return this.writersLock.isLocked(); + } + public async waitForUnlock(): Promise { await Promise.all([ this.readersLock.waitForUnlock(), diff --git a/src/utils/random.ts b/src/utils/random.ts new file mode 100644 index 000000000..fa0c3ecda --- /dev/null +++ b/src/utils/random.ts @@ -0,0 +1,11 @@ +/** + * Gets a random number between min (inc) and max (exc) + * This is not cryptographically-secure + */ +function getRandomInt(min: number, max: number) { + min = Math.ceil(min); + max = Math.floor(max); + return Math.floor(Math.random() * (max - min + 1)) + min; +} + +export { getRandomInt }; diff --git a/src/utils/utils.ts b/src/utils/utils.ts index 6b4ca4759..4bbaf054c 100644 --- a/src/utils/utils.ts +++ b/src/utils/utils.ts @@ -154,14 +154,16 @@ function promisify(f): (...args: any[]) => Promise { }; } -/** - * Deconstructed promise - */ -function promise(): { +export type PromiseType = { p: Promise; resolveP: (value: T | PromiseLike) => void; rejectP: (reason?: any) => void; -} { +}; + +/** + * Deconstructed promise + */ +function promise(): PromiseType { let resolveP, rejectP; const p = new Promise((resolve, reject) => { resolveP = resolve; @@ -220,6 +222,67 @@ function arrayZipWithPadding( ]); } +async function asyncIterableArray( + iterable: AsyncIterable, +): Promise> { + const arr: Array = []; + for await (const item of iterable) { + arr.push(item); + } + return arr; +} + +function bufferSplit( + input: Buffer, + delimiter?: Buffer, + limit?: number, + remaining: boolean = false, +): Array { + const output: Array = []; + let delimiterOffset = 0; + let delimiterIndex = 0; + let i = 0; + if (delimiter != null) { + while (true) { + if (i === limit) break; + delimiterIndex = input.indexOf(delimiter, delimiterOffset); + if (delimiterIndex > -1) { + output.push(input.subarray(delimiterOffset, delimiterIndex)); + delimiterOffset = delimiterIndex + delimiter.byteLength; + } else { + const chunk = input.subarray(delimiterOffset); + output.push(chunk); + delimiterOffset += chunk.byteLength; + break; + } + i++; + } + } else { + for (; delimiterIndex < input.byteLength; ) { + if (i === limit) break; + delimiterIndex++; + const chunk = input.subarray(delimiterOffset, delimiterIndex); + output.push(chunk); + delimiterOffset += chunk.byteLength; + i++; + } + } + // If remaining, then the rest of the input including delimiters is extracted + if ( + remaining && + limit != null && + output.length > 0 && + delimiterIndex > -1 && + delimiterIndex <= input.byteLength + ) { + const inputRemaining = input.subarray( + delimiterIndex - output[output.length - 1].byteLength, + ); + output[output.length - 1] = inputRemaining; + } + return output; +} + function debounce

( f: (...params: P) => any, timeout: number = 0, @@ -250,5 +313,7 @@ export { arrayUnset, arrayZip, arrayZipWithPadding, + asyncIterableArray, + bufferSplit, debounce, }; diff --git a/src/validation/utils.ts b/src/validation/utils.ts index 3ce13f258..020c1f51a 100644 --- a/src/validation/utils.ts +++ b/src/validation/utils.ts @@ -165,7 +165,7 @@ function parseHostOrHostname(data: any): Host | Hostname { * Parses number into a Port * Data can be a string-number */ -function parsePort(data: any): Port { +function parsePort(data: any, connect: boolean = false): Port { if (typeof data === 'string') { try { data = parseInteger(data); @@ -176,10 +176,16 @@ function parsePort(data: any): Port { throw e; } } - if (!networkUtils.isPort(data)) { - throw new validationErrors.ErrorParse( - 'Port must be a number between 0 and 65535 inclusive', - ); + if (!networkUtils.isPort(data, connect)) { + if (!connect) { + throw new validationErrors.ErrorParse( + 'Port must be a number between 0 and 65535 inclusive', + ); + } else { + throw new validationErrors.ErrorParse( + 'Port must be a number between 1 and 65535 inclusive', + ); + } } return data; } diff --git a/test-iterator.ts b/test-iterator.ts new file mode 100644 index 000000000..82a21762c --- /dev/null +++ b/test-iterator.ts @@ -0,0 +1,31 @@ + + +function getYouG () { + console.log('ALREADY EXECUTED'); + return abc(); +} + +async function *abc() { + console.log('START'); + yield 1; + yield 2; + yield 3; +} + +async function main () { + + // we would want that you don't iterate it + + const g = getYouG(); + + await g.next(); + + // console.log('SUP'); + + // for await (const r of abc()) { + // console.log(r); + // } + +} + +main(); diff --git a/test-lexi.ts b/test-lexi.ts new file mode 100644 index 000000000..b48f9cea1 --- /dev/null +++ b/test-lexi.ts @@ -0,0 +1,4 @@ +import lexi from 'lexicographic-integer'; + + +console.log(lexi.pack(1646203779)); diff --git a/test-nodegraph.ts b/test-nodegraph.ts new file mode 100644 index 000000000..33bd58bb7 --- /dev/null +++ b/test-nodegraph.ts @@ -0,0 +1,107 @@ +import type { NodeId, NodeAddress } from './src/nodes/types'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; +import NodeGraph from './src/nodes/NodeGraph'; +import KeyManager from './src/keys/KeyManager'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/db' + }); + + const keyManager = await KeyManager.createKeyManager({ + keysPath: './tmp/keys', + password: 'abc123', + // fresh: true + }); + + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + fresh: true + }); + + for (let i = 0; i < 10; i++) { + await nodeGraph.setNode( + generateRandomNodeId(), + { + host: '127.0.0.1', + port: 55555 + } as NodeAddress + ); + } + + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + + // the bucket lengths are wrong + console.log( + 'BucketIndex', + bucketIndex, + 'Bucket Count', + bucket.length, + ); + + // console.log(bucket); + for (const [nodeId, nodeData] of bucket) { + // console.log('NODEID', nodeId); + // console.log('NODEDATA', nodeData); + // console.log(nodeData.address); + } + } + + for await (const [nodeId, nodeData] of nodeGraph.getNodes()) { + // console.log(nodeId, nodeData); + } + + const bucket = await nodeGraph.getBucket(255, 'lastUpdated'); + console.log(bucket.length); + + // console.log('OLD NODE ID', keyManager.getNodeId()); + // const newNodeId = generateRandomNodeId(); + // console.log('NEW NODE ID', newNodeId); + + // console.log('---------FIRST RESET--------'); + + // await nodeGraph.resetBuckets(newNodeId); + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + + // console.log('---------SECOND RESET--------'); + // const newNodeId2 = generateRandomNodeId(); + // await nodeGraph.resetBuckets(newNodeId2); + + // for await (const [bucketIndex, bucket] of nodeGraph.getBuckets()) { + // console.log( + // 'BucketIndex', + // bucketIndex, + // 'Bucket Count', + // Object.keys(bucket).length + // ); + // } + + await nodeGraph.stop(); + await keyManager.stop(); + await db.stop(); +} + +main(); diff --git a/test-nodeidgen.ts b/test-nodeidgen.ts new file mode 100644 index 000000000..2f79bddda --- /dev/null +++ b/test-nodeidgen.ts @@ -0,0 +1,44 @@ +import type { NodeId } from './src/nodes/types'; +import { IdInternal } from '@matrixai/id'; +import * as keysUtils from './src/keys/utils'; +import * as nodesUtils from './src/nodes/utils'; + +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + +async function main () { + + const firstNodeId = generateRandomNodeId(); + + + let lastBucket = 0; + let penultimateBucket = 0; + let lowerBuckets = 0; + + for (let i = 0; i < 1000; i++) { + const nodeId = generateRandomNodeId(); + const bucketIndex = nodesUtils.bucketIndex(firstNodeId, nodeId); + if (bucketIndex === 255) { + lastBucket++; + } else if (bucketIndex === 254) { + penultimateBucket++; + } else { + lowerBuckets++; + } + } + + console.log(lastBucket); + console.log(penultimateBucket); + console.log(lowerBuckets); + + +} + +main(); diff --git a/test-order.ts b/test-order.ts new file mode 100644 index 000000000..f6046d6da --- /dev/null +++ b/test-order.ts @@ -0,0 +1,98 @@ +import { DB } from '@matrixai/db'; +import lexi from 'lexicographic-integer'; +import { getUnixtime, hex2Bytes } from './src/utils'; + +async function main () { + + const db = await DB.createDB({ + dbPath: './tmp/orderdb', + fresh: true + }); + + await db.put([], 'node1', 'value'); + await db.put([], 'node2', 'value'); + await db.put([], 'node3', 'value'); + await db.put([], 'node4', 'value'); + await db.put([], 'node5', 'value'); + await db.put([], 'node6', 'value'); + await db.put([], 'node7', 'value'); + + const now = new Date; + const t1 = new Date(now.getTime() + 1000 * 1); + const t2 = new Date(now.getTime() + 1000 * 2); + const t3 = new Date(now.getTime() + 1000 * 3); + const t4 = new Date(now.getTime() + 1000 * 4); + const t5 = new Date(now.getTime() + 1000 * 5); + const t6 = new Date(now.getTime() + 1000 * 6); + const t7 = new Date(now.getTime() + 1000 * 7); + + // so unix time is only what we really need to know + // further precision is unlikely + // and hex-packed time is shorter keys + // so it is likely faster + // the only issue is that unpacking requires + // converting hex into bytes, then into strings + + // console.log(t1.getTime()); + // console.log(getUnixtime(t1)); + // console.log(lexi.pack(getUnixtime(t1), 'hex')); + // console.log(lexi.pack(t1.getTime(), 'hex')); + // console.log(t1.toISOString()); + + + // buckets0!BUCKETINDEX!NODEID + // buckets0!BUCKETINDEX!date + + // Duplicate times that are put here + // But differentiate by the node1, node2 + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node1', 'value'); + await db.put([], lexi.pack(getUnixtime(t6), 'hex') + '-node2', 'value'); + + await db.put([], lexi.pack(getUnixtime(t1), 'hex') + '-node3', 'value'); + await db.put([], lexi.pack(getUnixtime(t4), 'hex') + '-node4', 'value'); + await db.put([], lexi.pack(getUnixtime(t3), 'hex') + '-node5', 'value'); + await db.put([], lexi.pack(getUnixtime(t2), 'hex') + '-node6', 'value'); + await db.put([], lexi.pack(getUnixtime(t5), 'hex') + '-node7', 'value'); + + // await db.put([], t6.toISOString() + '-node1', 'value'); + // await db.put([], t6.toISOString() + '-node2', 'value'); + + // await db.put([], t1.toISOString() + '-node3', 'value'); + // await db.put([], t4.toISOString() + '-node4', 'value'); + // await db.put([], t3.toISOString() + '-node5', 'value'); + // await db.put([], t2.toISOString() + '-node6', 'value'); + // await db.put([], t5.toISOString() + '-node7', 'value'); + + // Why did this require `-node3` + + // this will awlays get one or the other + + // ok so we if we want to say get a time + // or order it by time + // we are goingto have to create read stream over the bucket right? + // yea so we would have another sublevel, or at least a sublevel formed by the bucket + // one that is the bucket index + // so that would be the correct way to do it + + for await (const o of db.db.createReadStream({ + gte: lexi.pack(getUnixtime(t1), 'hex'), + limit: 1, + // keys: true, + // values: true, + // lte: lexi.pack(getUnixtime(t6)) + })) { + + console.log(o.key.toString()); + + } + + await db.stop(); + + + // so it works + // now if you give it something liek + + +} + +main(); diff --git a/test-sorting.ts b/test-sorting.ts new file mode 100644 index 000000000..1692fa83f --- /dev/null +++ b/test-sorting.ts @@ -0,0 +1,28 @@ +import * as testNodesUtils from './tests/nodes/utils'; + +const arr = [ + { a: 'abc', b: 3}, + { a: 'abc', b: 1}, + { a: 'abc', b: 0}, +]; + +arr.sort((a, b): number => { + if (a.b > b.b) { + return 1; + } else if (a.b < b.b) { + return -1; + } else { + return 0; + } +}); + +console.log(arr); + +const arr2 = [3, 1, 0]; + +arr2.sort(); + +console.log(arr2); + + +console.log(testNodesUtils.generateRandomNodeId()); diff --git a/test-split.ts b/test-split.ts new file mode 100644 index 000000000..ee06d75d6 --- /dev/null +++ b/test-split.ts @@ -0,0 +1,37 @@ + +function bufferSplit(input: Buffer, delimiter?: Buffer): Array { + const output: Array = []; + let delimiterIndex = 0; + let chunkIndex = 0; + if (delimiter != null) { + while (true) { + const i = input.indexOf( + delimiter, + delimiterIndex + ); + if (i > -1) { + output.push(input.subarray(chunkIndex, i)); + delimiterIndex = i + delimiter.byteLength; + chunkIndex = i + delimiter.byteLength; + } else { + output.push(input.subarray(chunkIndex)); + break; + } + } + } else { + for (let i = 0; i < input.byteLength; i++) { + output.push(input.subarray(i, i + 1)); + } + } + return output; +} + + +const b = Buffer.from('!a!!b!'); + +console.log(bufferSplit(b, Buffer.from('!!'))); +console.log(bufferSplit(b)); + +const s = '!a!!b!'; + +console.log(s.split('!!')); diff --git a/test-trie.ts b/test-trie.ts new file mode 100644 index 000000000..a17c4165d --- /dev/null +++ b/test-trie.ts @@ -0,0 +1,29 @@ +import * as utils from './src/utils'; +import * as nodesUtils from './src/nodes/utils'; + +// 110 +const ownNodeId = Buffer.from([6]); + +const i = 2; + +const maxDistance = utils.bigInt2Bytes(BigInt(2 ** i)); +const minDistance = utils.bigInt2Bytes(BigInt(2 ** (i - 1))); + +console.log('max distance', maxDistance, utils.bytes2Bits(maxDistance)); +console.log('min distance', minDistance, utils.bytes2Bits(minDistance)); + +// ownNodeId XOR maxdistance = GTE node id +const gte = ownNodeId.map((byte, i) => byte ^ maxDistance[i]); + +// ownNodeId XOR mindistance = LT node id +const lt = ownNodeId.map((byte, i) => byte ^ minDistance[i]); + +console.log('Lowest Distance Node (inc)', gte, utils.bytes2Bits(gte)); +console.log('Greatest Distance Node (exc)', lt, utils.bytes2Bits(lt)); + +// function nodeDistance(nodeId1: Buffer, nodeId2: Buffer): bigint { +// const distance = nodeId1.map((byte, i) => byte ^ nodeId2[i]); +// return utils.bytes2BigInt(distance); +// } + +// console.log(nodeDistance(ownNodeId, Buffer.from([0]))); diff --git a/tests/acl/ACL.test.ts b/tests/acl/ACL.test.ts index a75819f2f..14ea88cc8 100644 --- a/tests/acl/ACL.test.ts +++ b/tests/acl/ACL.test.ts @@ -10,7 +10,7 @@ import { DB } from '@matrixai/db'; import { ACL, errors as aclErrors } from '@/acl'; import { utils as keysUtils } from '@/keys'; import { utils as vaultsUtils } from '@/vaults'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe(ACL.name, () => { const logger = new Logger(`${ACL.name} test`, LogLevel.WARN, [ @@ -18,14 +18,14 @@ describe(ACL.name, () => { ]); // Node Ids - const nodeIdX = testUtils.generateRandomNodeId(); - const nodeIdY = testUtils.generateRandomNodeId(); - const nodeIdG1First = testUtils.generateRandomNodeId(); - const nodeIdG1Second = testUtils.generateRandomNodeId(); - const nodeIdG1Third = testUtils.generateRandomNodeId(); - const nodeIdG1Fourth = testUtils.generateRandomNodeId(); - const nodeIdG2First = testUtils.generateRandomNodeId(); - const nodeIdG2Second = testUtils.generateRandomNodeId(); + const nodeIdX = testNodesUtils.generateRandomNodeId(); + const nodeIdY = testNodesUtils.generateRandomNodeId(); + const nodeIdG1First = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Second = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Third = testNodesUtils.generateRandomNodeId(); + const nodeIdG1Fourth = testNodesUtils.generateRandomNodeId(); + const nodeIdG2First = testNodesUtils.generateRandomNodeId(); + const nodeIdG2Second = testNodesUtils.generateRandomNodeId(); let dataDir: string; let db: DB; diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index 78808e361..85cca7c99 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -6,6 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -21,6 +22,7 @@ import NotificationsManager from '@/notifications/NotificationsManager'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as agentErrors from '@/agent/errors'; import * as keysUtils from '@/keys/utils'; +import { timerStart } from '@/utils'; import * as testAgentUtils from './utils'; describe(GRPCClientAgent.name, () => { @@ -48,6 +50,7 @@ describe(GRPCClientAgent.name, () => { let keyManager: KeyManager; let vaultManager: VaultManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -109,21 +112,26 @@ describe(GRPCClientAgent.name, () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db: db, sigchain: sigchain, keyManager: keyManager, nodeGraph: nodeGraph, nodeConnectionManager: nodeConnectionManager, + queue, logger: logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl: acl, @@ -173,6 +181,8 @@ describe(GRPCClientAgent.name, () => { await notificationsManager.stop(); await sigchain.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); @@ -255,7 +265,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy1.getForwardPort(), authToken: clientProxy1.authToken, }, - timeout: 5000, + timer: timerStart(5000), logger, }); @@ -289,7 +299,7 @@ describe(GRPCClientAgent.name, () => { port: clientProxy2.getForwardPort(), authToken: clientProxy2.authToken, }, - timeout: 5000, + timer: timerStart(5000), }); }); afterEach(async () => { diff --git a/tests/agent/service/nodesChainDataGet.test.ts b/tests/agent/service/nodesChainDataGet.test.ts new file mode 100644 index 000000000..8bc388763 --- /dev/null +++ b/tests/agent/service/nodesChainDataGet.test.ts @@ -0,0 +1,108 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeIdEncoded } from '@/nodes/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesClosestLocalNodesGet from '@/agent/service/nodesClosestLocalNodesGet'; +import * as testNodesUtils from '../../nodes/utils'; +import * as testUtils from '../../utils'; + +describe('nodesClosestLocalNode', () => { + const logger = new Logger('nodesClosestLocalNode test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + // Setting up a remote keynode + const agentService = { + nodesClosestLocalNodesGet: nodesClosestLocalNodesGet({ + nodeGraph: pkAgent.nodeGraph, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest local nodes', async () => { + // Adding 10 nodes + const nodes: Array = []; + for (let i = 0; i < 10; i++) { + const nodeId = testNodesUtils.generateRandomNodeId(); + await pkAgent.nodeGraph.setNode(nodeId, { + host: 'localhost' as Host, + port: 55555 as Port, + }); + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + const nodeIdEncoded = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeMessage = new nodesPB.Node(); + nodeMessage.setNodeId(nodeIdEncoded); + const result = await grpcClient.nodesClosestLocalNodesGet(nodeMessage); + const resultNodes: Array = []; + for (const [resultNode] of result.toObject().nodeTableMap) { + resultNodes.push(resultNode as NodeIdEncoded); + } + expect(nodes.sort()).toEqual(resultNodes.sort()); + }); +}); diff --git a/tests/agent/service/nodesClosestLocalNode.test.ts b/tests/agent/service/nodesClosestLocalNode.test.ts new file mode 100644 index 000000000..5453d8e5a --- /dev/null +++ b/tests/agent/service/nodesClosestLocalNode.test.ts @@ -0,0 +1,118 @@ +import type { Host, Port } from '@/network/types'; +import type { ClaimData } from '@/claims/types'; +import type { IdentityId, ProviderId } from '@/identities/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesChainDataGet from '@/agent/service/nodesChainDataGet'; +import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; + +describe('nodesChainDataGet', () => { + const logger = new Logger('nodesChainDataGet test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesChainDataGet: nodesChainDataGet({ + sigchain: pkAgent.sigchain, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get closest nodes', async () => { + const srcNodeIdEncoded = nodesUtils.encodeNodeId( + pkAgent.keyManager.getNodeId(), + ); + // Add 10 claims + for (let i = 1; i <= 5; i++) { + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); + const nodeLink: ClaimData = { + type: 'node', + node1: srcNodeIdEncoded, + node2: node2, + }; + await pkAgent.sigchain.addClaim(nodeLink); + } + for (let i = 6; i <= 10; i++) { + const identityLink: ClaimData = { + type: 'identity', + node: srcNodeIdEncoded, + provider: ('ProviderId' + i.toString()) as ProviderId, + identity: ('IdentityId' + i.toString()) as IdentityId, + }; + await pkAgent.sigchain.addClaim(identityLink); + } + + const response = await grpcClient.nodesChainDataGet( + new utilsPB.EmptyMessage(), + ); + const chainIds: Array = []; + for (const [id] of response.toObject().chainDataMap) chainIds.push(id); + expect(chainIds).toHaveLength(10); + }); +}); diff --git a/tests/agent/service/nodesHolePunchMessage.test.ts b/tests/agent/service/nodesHolePunchMessage.test.ts new file mode 100644 index 000000000..4bef6d759 --- /dev/null +++ b/tests/agent/service/nodesHolePunchMessage.test.ts @@ -0,0 +1,103 @@ +import type { Host, Port } from '@/network/types'; +import fs from 'fs'; +import path from 'path'; +import os from 'os'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import PolykeyAgent from '@/PolykeyAgent'; +import GRPCServer from '@/grpc/GRPCServer'; +import GRPCClientAgent from '@/agent/GRPCClientAgent'; +import { AgentServiceService } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; +import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; +import * as keysUtils from '@/keys/utils'; +import * as nodesUtils from '@/nodes/utils'; +import nodesHolePunchMessageSend from '@/agent/service/nodesHolePunchMessageSend'; +import * as networkUtils from '@/network/utils'; +import * as testUtils from '../../utils'; + +describe('nodesHolePunchMessage', () => { + const logger = new Logger('nodesHolePunchMessage test', LogLevel.WARN, [ + new StreamHandler(), + ]); + const password = 'helloworld'; + let dataDir: string; + let nodePath: string; + let grpcServer: GRPCServer; + let grpcClient: GRPCClientAgent; + let pkAgent: PolykeyAgent; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValueOnce(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValueOnce(globalKeyPair); + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + nodePath = path.join(dataDir, 'keynode'); + pkAgent = await PolykeyAgent.createPolykeyAgent({ + password, + nodePath, + keysConfig: { + rootKeyPairBits: 2048, + }, + seedNodes: {}, // Explicitly no seed nodes on startup + networkConfig: { + proxyHost: '127.0.0.1' as Host, + }, + logger, + }); + const agentService = { + nodesHolePunchMessageSend: nodesHolePunchMessageSend({ + keyManager: pkAgent.keyManager, + nodeConnectionManager: pkAgent.nodeConnectionManager, + nodeManager: pkAgent.nodeManager, + }), + }; + grpcServer = new GRPCServer({ logger }); + await grpcServer.start({ + services: [[AgentServiceService, agentService]], + host: '127.0.0.1' as Host, + port: 0 as Port, + }); + grpcClient = await GRPCClientAgent.createGRPCClientAgent({ + nodeId: pkAgent.keyManager.getNodeId(), + host: '127.0.0.1' as Host, + port: grpcServer.getPort(), + logger, + }); + }, global.defaultTimeout); + afterAll(async () => { + await grpcClient.destroy(); + await grpcServer.stop(); + await pkAgent.stop(); + await pkAgent.destroy(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + test('should get the chain data', async () => { + const nodeId = nodesUtils.encodeNodeId(pkAgent.keyManager.getNodeId()); + const proxyAddress = networkUtils.buildAddress( + pkAgent.proxy.getProxyHost(), + pkAgent.proxy.getProxyPort(), + ); + const signature = await pkAgent.keyManager.signWithRootKeyPair( + Buffer.from(proxyAddress), + ); + const relayMessage = new nodesPB.Relay(); + relayMessage + .setTargetId(nodeId) + .setSrcId(nodeId) + .setSignature(signature.toString()) + .setProxyAddress(proxyAddress); + await grpcClient.nodesHolePunchMessageSend(relayMessage); + // TODO: check if the ping was sent + }); +}); diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index a0eb81ffa..07ca04646 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -8,6 +8,7 @@ import { createPrivateKey, createPublicKey } from 'crypto'; import { exportJWK, SignJWT } from 'jose'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -39,6 +40,7 @@ describe('notificationsSend', () => { let senderKeyManager: KeyManager; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -108,23 +110,30 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeGraph, nodeConnectionManager, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -157,6 +166,8 @@ describe('notificationsSend', () => { await grpcServer.stop(); await notificationsManager.stop(); await nodeConnectionManager.stop(); + await queue.stop(); + await nodeManager.stop(); await sigchain.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/agent/utils.ts b/tests/agent/utils.ts index 8cf77303e..f61193805 100644 --- a/tests/agent/utils.ts +++ b/tests/agent/utils.ts @@ -1,5 +1,4 @@ import type { Host, Port, ProxyConfig } from '@/network/types'; - import type { IAgentServiceServer } from '@/proto/js/polykey/v1/agent_service_grpc_pb'; import type { KeyManager } from '@/keys'; import type { VaultManager } from '@/vaults'; @@ -18,7 +17,8 @@ import { GRPCClientAgent, AgentServiceService, } from '@/agent'; -import * as testUtils from '../utils'; +import { timerStart } from '@/utils'; +import * as testNodesUtils from '../nodes/utils'; async function openTestAgentServer({ keyManager, @@ -81,13 +81,13 @@ async function openTestAgentClient( new StreamHandler(), ]); const agentClient = await GRPCClientAgent.createGRPCClientAgent({ - nodeId: nodeId ?? testUtils.generateRandomNodeId(), + nodeId: nodeId ?? testNodesUtils.generateRandomNodeId(), host: '127.0.0.1' as Host, port: port as Port, logger: logger, destroyCallback: async () => {}, proxyConfig, - timeout: 30000, + timer: timerStart(30000), }); return agentClient; } diff --git a/tests/bin/nodes/add.test.ts b/tests/bin/nodes/add.test.ts index 062cf6cdf..85b598786 100644 --- a/tests/bin/nodes/add.test.ts +++ b/tests/bin/nodes/add.test.ts @@ -11,11 +11,12 @@ import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as testBinUtils from '../utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('add', () => { const logger = new Logger('add test', LogLevel.WARN, [new StreamHandler()]); const password = 'helloworld'; - const validNodeId = testUtils.generateRandomNodeId(); + const validNodeId = testNodesUtils.generateRandomNodeId(); const invalidNodeId = IdInternal.fromString('INVALIDID'); const validHost = '0.0.0.0'; const invalidHost = 'INVALIDHOST'; diff --git a/tests/bin/vaults/vaults.test.ts b/tests/bin/vaults/vaults.test.ts index 52b5f4e4c..949f208ee 100644 --- a/tests/bin/vaults/vaults.test.ts +++ b/tests/bin/vaults/vaults.test.ts @@ -11,7 +11,7 @@ import * as vaultsUtils from '@/vaults/utils'; import sysexits from '@/utils/sysexits'; import NotificationsManager from '@/notifications/NotificationsManager'; import * as testBinUtils from '../utils'; -import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -378,7 +378,7 @@ describe('CLI vaults', () => { mockedSendNotification.mockImplementation(async (_) => {}); const vaultId = await polykeyAgent.vaultManager.createVault(vaultName); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -418,7 +418,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), @@ -489,7 +489,7 @@ describe('CLI vaults', () => { ); const vaultIdEncoded1 = vaultsUtils.encodeVaultId(vaultId1); const vaultIdEncoded2 = vaultsUtils.encodeVaultId(vaultId2); - const targetNodeId = testUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); const targetNodeIdEncoded = nodesUtils.encodeNodeId(targetNodeId); await polykeyAgent.gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(targetNodeId), diff --git a/tests/claims/utils.test.ts b/tests/claims/utils.test.ts index f7c6e6410..e57403683 100644 --- a/tests/claims/utils.test.ts +++ b/tests/claims/utils.test.ts @@ -11,12 +11,13 @@ import * as claimsErrors from '@/claims/errors'; import { utils as keysUtils } from '@/keys'; import { utils as nodesUtils } from '@/nodes'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('claims/utils', () => { // Node Ids - const nodeId1 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); const nodeId1Encoded = nodesUtils.encodeNodeId(nodeId1); - const nodeId2 = testUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); const nodeId2Encoded = nodesUtils.encodeNodeId(nodeId2); let publicKey: PublicKeyPem; @@ -327,9 +328,7 @@ describe('claims/utils', () => { // Create some dummy public key, and check that this does not verify const dummyKeyPair = await keysUtils.generateKeyPair(2048); - const dummyPublicKey = await keysUtils.publicKeyToPem( - dummyKeyPair.publicKey, - ); + const dummyPublicKey = keysUtils.publicKeyToPem(dummyKeyPair.publicKey); expect(await claimsUtils.verifyClaimSignature(claim, dummyPublicKey)).toBe( false, ); diff --git a/tests/client/GRPCClientClient.test.ts b/tests/client/GRPCClientClient.test.ts index a6ce3f3bb..ccf8c0596 100644 --- a/tests/client/GRPCClientClient.test.ts +++ b/tests/client/GRPCClientClient.test.ts @@ -11,6 +11,7 @@ import Session from '@/sessions/Session'; import * as keysUtils from '@/keys/utils'; import * as clientErrors from '@/client/errors'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as testClientUtils from './utils'; import * as testUtils from '../utils'; @@ -76,7 +77,7 @@ describe(GRPCClientClient.name, () => { port: port as Port, tlsConfig: { keyPrivatePem: undefined, certChainPem: undefined }, logger: logger, - timeout: 10000, + timer: timerStart(10000), session: session, }); await client.destroy(); diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index a987f6aad..14178ba49 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -59,6 +60,7 @@ describe('gestaltsDiscoveryByIdentity', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -125,23 +127,30 @@ describe('gestaltsDiscoveryByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -176,6 +185,8 @@ describe('gestaltsDiscoveryByIdentity', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index d03fe307a..1611382f7 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -26,6 +27,7 @@ import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('gestaltsDiscoveryByNode', () => { const logger = new Logger('gestaltsDiscoveryByNode test', LogLevel.WARN, [ @@ -35,7 +37,7 @@ describe('gestaltsDiscoveryByNode', () => { const authenticate = async (metaClient, metaServer = new Metadata()) => metaServer; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; let mockedGenerateKeyPair: jest.SpyInstance; @@ -59,6 +61,7 @@ describe('gestaltsDiscoveryByNode', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -125,23 +128,30 @@ describe('gestaltsDiscoveryByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -176,6 +186,8 @@ describe('gestaltsDiscoveryByNode', () => { await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index 8bd0a749e..9103363d1 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -10,6 +10,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -116,6 +117,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -192,23 +194,30 @@ describe('gestaltsGestaltTrustByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -247,6 +256,8 @@ describe('gestaltsGestaltTrustByIdentity', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index ccc7c827d..62157f2b1 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -10,6 +10,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -114,6 +115,7 @@ describe('gestaltsGestaltTrustByNode', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; + let queue: Queue; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -190,23 +192,30 @@ describe('gestaltsGestaltTrustByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -245,6 +254,8 @@ describe('gestaltsGestaltTrustByNode', () => { await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index b040a7b0a..87bd66723 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -2,12 +2,14 @@ import type { ClaimLinkIdentity } from '@/claims/types'; import type { NodeIdEncoded } from '@/nodes/types'; import type { IdentityId, ProviderId } from '@/identities/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import IdentitiesManager from '@/identities/IdentitiesManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -54,6 +56,7 @@ describe('identitiesClaim', () => { let mockedGenerateKeyPair: jest.SpyInstance; let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let mockedAddClaim: jest.SpyInstance; + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const claim = await claimsUtils.createClaim({ @@ -83,6 +86,7 @@ describe('identitiesClaim', () => { let testProvider: TestProvider; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -134,14 +138,19 @@ describe('identitiesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ connConnectTime: 2000, proxy, keyManager, nodeGraph, - logger: logger.getChild('nodeConnectionManager'), + queue, + logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await queue.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const clientService = { identitiesClaim: identitiesClaim({ authenticate, @@ -167,6 +176,7 @@ describe('identitiesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await nodeGraph.stop(); await sigchain.stop(); await proxy.stop(); diff --git a/tests/client/service/keysKeyPairRenew.test.ts b/tests/client/service/keysKeyPairRenew.test.ts index 714055cf0..b3e414cbe 100644 --- a/tests/client/service/keysKeyPairRenew.test.ts +++ b/tests/client/service/keysKeyPairRenew.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairRenew', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairRenew', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/keysKeyPairReset.test.ts b/tests/client/service/keysKeyPairReset.test.ts index 155d6071e..e0b8f61ae 100644 --- a/tests/client/service/keysKeyPairReset.test.ts +++ b/tests/client/service/keysKeyPairReset.test.ts @@ -7,7 +7,6 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; -import NodeGraph from '@/nodes/NodeGraph'; import PolykeyAgent from '@/PolykeyAgent'; import GRPCServer from '@/grpc/GRPCServer'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -17,6 +16,7 @@ import * as keysPB from '@/proto/js/polykey/v1/keys/keys_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import * as clientUtils from '@/client/utils/utils'; import * as keysUtils from '@/keys/utils'; +import { NodeManager } from '@/nodes'; import * as testUtils from '../../utils'; describe('keysKeyPairReset', () => { @@ -32,7 +32,7 @@ describe('keysKeyPairReset', () => { beforeAll(async () => { const globalKeyPair = await testUtils.setupGlobalKeypair(); const newKeyPair = await keysUtils.generateKeyPair(1024); - mockedRefreshBuckets = jest.spyOn(NodeGraph.prototype, 'refreshBuckets'); + mockedRefreshBuckets = jest.spyOn(NodeManager.prototype, 'resetBuckets'); mockedGenerateKeyPair = jest .spyOn(keysUtils, 'generateKeyPair') .mockResolvedValueOnce(globalKeyPair) diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index 1cd51eb05..c264c8234 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -49,6 +50,7 @@ describe('nodesAdd', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -95,23 +97,30 @@ describe('nodesAdd', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesAdd: nodesAdd({ authenticate, @@ -136,6 +145,8 @@ describe('nodesAdd', () => { await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await db.stop(); @@ -163,8 +174,7 @@ describe('nodesAdd', () => { )!, ); expect(result).toBeDefined(); - expect(result!.host).toBe('127.0.0.1'); - expect(result!.port).toBe(11111); + expect(result!.address).toEqual({ host: '127.0.0.1', port: 11111 }); }); test('cannot add invalid node', async () => { // Invalid host diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 07d41e500..8443b264e 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -7,6 +7,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NotificationsManager from '@/notifications/NotificationsManager'; import ACL from '@/acl/ACL'; @@ -76,6 +77,7 @@ describe('nodesClaim', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -128,23 +130,30 @@ describe('nodesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, + nodeGraph, + sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -178,6 +187,8 @@ describe('nodesClaim', () => { await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); await sigchain.stop(); diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index 1197638f5..8adf8b4d4 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -1,10 +1,12 @@ import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -54,6 +56,7 @@ describe('nodesFind', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -99,15 +102,20 @@ describe('nodesFind', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await queue.start(); + await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); const clientService = { nodesFind: nodesFind({ nodeConnectionManager, @@ -133,6 +141,7 @@ describe('nodesFind', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index 0bfcabc97..ae30bf8f9 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -54,6 +55,7 @@ describe('nodesPing', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -100,23 +102,29 @@ describe('nodesPing', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeConnectionManager.start({ nodeManager }); const clientService = { nodesPing: nodesPing({ authenticate, @@ -142,6 +150,7 @@ describe('nodesPing', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index 2fab2e233..00b9bf65d 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -5,6 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -53,6 +54,7 @@ describe('notificationsClear', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -105,23 +107,30 @@ describe('notificationsClear', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, nodeConnectionManager, nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -156,6 +165,8 @@ describe('notificationsClear', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index 1b77af1a3..5a490e2cf 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -24,12 +25,13 @@ import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import * as clientUtils from '@/client/utils'; import * as testUtils from '../../utils'; +import * as testNodesUtils from '../../nodes/utils'; describe('notificationsRead', () => { const logger = new Logger('notificationsRead test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdSender = testUtils.generateRandomNodeId(); + const nodeIdSender = testNodesUtils.generateRandomNodeId(); const nodeIdSenderEncoded = nodesUtils.encodeNodeId(nodeIdSender); const password = 'helloworld'; const authenticate = async (metaClient, metaServer = new Metadata()) => @@ -127,6 +129,7 @@ describe('notificationsRead', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -179,23 +182,30 @@ describe('notificationsRead', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -231,6 +241,8 @@ describe('notificationsRead', () => { await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await proxy.stop(); await acl.stop(); await db.stop(); diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 01764d368..58c0f321d 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -6,6 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -63,6 +64,7 @@ describe('notificationsSend', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -114,23 +116,30 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - nodeGraph, nodeConnectionManager, + nodeGraph, sigchain, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -165,6 +174,8 @@ describe('notificationsSend', () => { await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); diff --git a/tests/client/utils.ts b/tests/client/utils.ts index 247b1da8b..7b49eb788 100644 --- a/tests/client/utils.ts +++ b/tests/client/utils.ts @@ -12,7 +12,7 @@ import { } from '@/proto/js/polykey/v1/client_service_grpc_pb'; import { createClientService } from '@/client'; import PolykeyClient from '@/PolykeyClient'; -import { promisify } from '@/utils'; +import { promisify, timerStart } from '@/utils'; import * as grpcUtils from '@/grpc/utils'; async function openTestClientServer({ @@ -81,7 +81,7 @@ async function openTestClientClient( port: port, fs, logger, - timeout: 30000, + timer: timerStart(30000), }); return pkc; diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 70c4641dd..003deca7c 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -7,6 +7,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; +import Queue from '@/nodes/Queue'; import { PolykeyAgent } from '@'; import { Discovery } from '@/discovery'; import { GestaltGraph } from '@/gestalts'; @@ -47,6 +48,7 @@ describe('Discovery', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; @@ -130,23 +132,30 @@ describe('Discovery', () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, - sigchain, - nodeGraph, nodeConnectionManager, - logger: logger.getChild('nodeManager'), + nodeGraph, + sigchain, + queue, + logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up other gestalt nodeA = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -202,6 +211,8 @@ describe('Discovery', () => { await nodeA.stop(); await nodeB.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); + await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); @@ -237,7 +248,7 @@ describe('Discovery', () => { discovery.queueDiscoveryByIdentity('' as ProviderId, '' as IdentityId), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); await expect( - discovery.queueDiscoveryByNode(testUtils.generateRandomNodeId()), + discovery.queueDiscoveryByNode(testNodesUtils.generateRandomNodeId()), ).rejects.toThrow(discoveryErrors.ErrorDiscoveryNotRunning); }); test('discovery by node', async () => { diff --git a/tests/gestalts/GestaltGraph.test.ts b/tests/gestalts/GestaltGraph.test.ts index fa30c86bd..84a15c2db 100644 --- a/tests/gestalts/GestaltGraph.test.ts +++ b/tests/gestalts/GestaltGraph.test.ts @@ -20,19 +20,19 @@ import * as gestaltsErrors from '@/gestalts/errors'; import * as gestaltsUtils from '@/gestalts/utils'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GestaltGraph', () => { const logger = new Logger('GestaltGraph Test', LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdDEE = testUtils.generateRandomNodeId(); + const nodeIdDEE = testNodesUtils.generateRandomNodeId(); const nodeIdDEEEncoded = nodesUtils.encodeNodeId(nodeIdDEE); - const nodeIdDEF = testUtils.generateRandomNodeId(); + const nodeIdDEF = testNodesUtils.generateRandomNodeId(); const nodeIdDEFEncoded = nodesUtils.encodeNodeId(nodeIdDEF); - const nodeIdZZZ = testUtils.generateRandomNodeId(); + const nodeIdZZZ = testNodesUtils.generateRandomNodeId(); const nodeIdZZZEncoded = nodesUtils.encodeNodeId(nodeIdZZZ); let dataDir: string; diff --git a/tests/grpc/GRPCClient.test.ts b/tests/grpc/GRPCClient.test.ts index a4f83a1e0..929b8f933 100644 --- a/tests/grpc/GRPCClient.test.ts +++ b/tests/grpc/GRPCClient.test.ts @@ -10,13 +10,15 @@ import path from 'path'; import fs from 'fs'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { utils as keysUtils } from '@/keys'; -import { Session, SessionManager } from '@/sessions'; -import { errors as grpcErrors } from '@/grpc'; +import Session from '@/sessions/Session'; +import SessionManager from '@/sessions/SessionManager'; +import * as keysUtils from '@/keys/utils'; +import * as grpcErrors from '@/grpc/errors'; import * as clientUtils from '@/client/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import { timerStart } from '@/utils'; import * as utils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('GRPCClient', () => { const logger = new Logger('GRPCClient Test', LogLevel.WARN, [ @@ -60,7 +62,7 @@ describe('GRPCClient', () => { }, }); const keyManager = { - getNodeId: () => testUtils.generateRandomNodeId(), + getNodeId: () => testNodesUtils.generateRandomNodeId(), } as KeyManager; // Cheeky mocking. sessionManager = await SessionManager.createSessionManager({ db, @@ -108,7 +110,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); await client.destroy(); @@ -122,7 +124,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const m = new utilsPB.EchoMessage(); @@ -155,7 +157,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); let pCall: PromiseUnaryCall; @@ -191,7 +193,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -234,7 +236,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const challenge = 'f9s8d7f4'; @@ -259,7 +261,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream, response] = client.clientStream(); @@ -297,7 +299,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const [stream] = client.clientStream(); @@ -320,7 +322,7 @@ describe('GRPCClient', () => { keyPrivatePem: keysUtils.privateKeyToPem(clientKeyPair.privateKey), certChainPem: keysUtils.certToPem(clientCert), }, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); @@ -355,7 +357,7 @@ describe('GRPCClient', () => { certChainPem: keysUtils.certToPem(clientCert), }, session, - timeout: 1000, + timer: timerStart(1000), logger, }); const stream = client.duplexStream(); diff --git a/tests/grpc/utils/GRPCClientTest.ts b/tests/grpc/utils/GRPCClientTest.ts index c4b55b1d1..a68527c83 100644 --- a/tests/grpc/utils/GRPCClientTest.ts +++ b/tests/grpc/utils/GRPCClientTest.ts @@ -5,6 +5,7 @@ import type { Host, Port, TLSConfig, ProxyConfig } from '@/network/types'; import type * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; import type { ClientReadableStream } from '@grpc/grpc-js/build/src/call'; import type { AsyncGeneratorReadableStreamClient } from '@/grpc/types'; +import type { Timer } from '@/types'; import Logger from '@matrixai/logger'; import { CreateDestroy, ready } from '@matrixai/async-init/dist/CreateDestroy'; import { GRPCClient, utils as grpcUtils } from '@/grpc'; @@ -21,7 +22,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig, proxyConfig, session, - timeout = Infinity, + timer, destroyCallback, logger = new Logger(this.name), }: { @@ -31,7 +32,7 @@ class GRPCClientTest extends GRPCClient { tlsConfig?: TLSConfig; proxyConfig?: ProxyConfig; session?: Session; - timeout?: number; + timer?: Timer; destroyCallback?: () => Promise; logger?: Logger; }): Promise { @@ -47,7 +48,7 @@ class GRPCClientTest extends GRPCClient { port, tlsConfig, proxyConfig, - timeout, + timer, interceptors, logger, }); diff --git a/tests/identities/IdentitiesManager.test.ts b/tests/identities/IdentitiesManager.test.ts index b7ca969b0..23000440b 100644 --- a/tests/identities/IdentitiesManager.test.ts +++ b/tests/identities/IdentitiesManager.test.ts @@ -17,7 +17,7 @@ import * as identitiesErrors from '@/identities/errors'; import * as keysUtils from '@/keys/utils'; import * as nodesUtils from '@/nodes/utils'; import TestProvider from './TestProvider'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('IdentitiesManager', () => { const logger = new Logger('IdentitiesManager Test', LogLevel.WARN, [ @@ -219,7 +219,7 @@ describe('IdentitiesManager', () => { expect(identityDatas).toHaveLength(1); expect(identityDatas).not.toContainEqual(identityData); // Now publish a claim - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); const signatures: Record = {}; signatures[nodeIdSome] = { diff --git a/tests/keys/KeyManager.test.ts b/tests/keys/KeyManager.test.ts index 773b5d3eb..05840fd76 100644 --- a/tests/keys/KeyManager.test.ts +++ b/tests/keys/KeyManager.test.ts @@ -88,9 +88,9 @@ describe('KeyManager', () => { expect(keysPathContents).toContain('root_certs'); expect(keysPathContents).toContain('db.key'); expect(keyManager.dbKey.toString()).toBeTruthy(); - const rootKeyPairPem = await keyManager.getRootKeyPairPem(); + const rootKeyPairPem = keyManager.getRootKeyPairPem(); expect(rootKeyPairPem).not.toBeUndefined(); - const rootCertPem = await keyManager.getRootCertPem(); + const rootCertPem = keyManager.getRootCertPem(); expect(rootCertPem).not.toBeUndefined(); const rootCertPems = await keyManager.getRootCertChainPems(); expect(rootCertPems.length).toBe(1); diff --git a/tests/network/Proxy.test.ts b/tests/network/Proxy.test.ts index 4393c69b9..fafa5f927 100644 --- a/tests/network/Proxy.test.ts +++ b/tests/network/Proxy.test.ts @@ -1,6 +1,6 @@ import type { Socket, AddressInfo } from 'net'; import type { KeyPairPem } from '@/keys/types'; -import type { Host, Port } from '@/network/types'; +import type { ConnectionData, Host, Port } from '@/network/types'; import http from 'http'; import net from 'net'; import tls from 'tls'; @@ -13,8 +13,9 @@ import { } from '@/network'; import * as keysUtils from '@/keys/utils'; import { promisify, promise, timerStart, timerStop, poll } from '@/utils'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; /** * Mock HTTP Connect Request @@ -113,11 +114,11 @@ describe(Proxy.name, () => { const logger = new Logger(`${Proxy.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeIdABC = testUtils.generateRandomNodeId(); + const nodeIdABC = testNodesUtils.generateRandomNodeId(); const nodeIdABCEncoded = nodesUtils.encodeNodeId(nodeIdABC); - const nodeIdSome = testUtils.generateRandomNodeId(); + const nodeIdSome = testNodesUtils.generateRandomNodeId(); const nodeIdSomeEncoded = nodesUtils.encodeNodeId(nodeIdSome); - const nodeIdRandom = testUtils.generateRandomNodeId(); + const nodeIdRandom = testNodesUtils.generateRandomNodeId(); const authToken = 'abc123'; let keyPairPem: KeyPairPem; let certPem: string; @@ -2935,4 +2936,120 @@ describe(Proxy.name, () => { utpSocket.unref(); await serverClose(); }); + test('connectionEstablishedCallback is called when a ReverseConnection is established', async () => { + const clientKeyPair = await keysUtils.generateKeyPair(1024); + const clientKeyPairPem = keysUtils.keyPairToPem(clientKeyPair); + const clientCert = keysUtils.generateCertificate( + clientKeyPair.publicKey, + clientKeyPair.privateKey, + clientKeyPair.privateKey, + 86400, + ); + const clientCertPem = keysUtils.certToPem(clientCert); + const { + serverListen, + serverClose, + serverConnP, + serverConnEndP, + serverConnClosedP, + serverHost, + serverPort, + } = tcpServer(); + await serverListen(0, '127.0.0.1'); + const clientNodeId = keysUtils.certNodeId(clientCert)!; + let callbackData: ConnectionData | undefined; + const proxy = new Proxy({ + logger: logger, + authToken: '', + connectionEstablishedCallback: (data) => { + callbackData = data; + }, + }); + await proxy.start({ + serverHost: serverHost(), + serverPort: serverPort(), + proxyHost: localHost, + tlsConfig: { + keyPrivatePem: keyPairPem.privateKey, + certChainPem: certPem, + }, + }); + + const proxyHost = proxy.getProxyHost(); + const proxyPort = proxy.getProxyPort(); + const { p: clientReadyP, resolveP: resolveClientReadyP } = promise(); + const { p: clientSecureConnectP, resolveP: resolveClientSecureConnectP } = + promise(); + const { p: clientCloseP, resolveP: resolveClientCloseP } = promise(); + const utpSocket = UTP({ allowHalfOpen: true }); + const utpSocketBind = promisify(utpSocket.bind).bind(utpSocket); + const handleMessage = async (data: Buffer) => { + const msg = networkUtils.unserializeNetworkMessage(data); + if (msg.type === 'ping') { + resolveClientReadyP(); + await send(networkUtils.pongBuffer); + } + }; + utpSocket.on('message', handleMessage); + const send = async (data: Buffer) => { + const utpSocketSend = promisify(utpSocket.send).bind(utpSocket); + await utpSocketSend(data, 0, data.byteLength, proxyPort, proxyHost); + }; + await utpSocketBind(0, '127.0.0.1'); + const utpSocketPort = utpSocket.address().port; + await proxy.openConnectionReverse( + '127.0.0.1' as Host, + utpSocketPort as Port, + ); + const utpConn = utpSocket.connect(proxyPort, proxyHost); + const tlsSocket = tls.connect( + { + key: Buffer.from(clientKeyPairPem.privateKey, 'ascii'), + cert: Buffer.from(clientCertPem, 'ascii'), + socket: utpConn, + rejectUnauthorized: false, + }, + () => { + resolveClientSecureConnectP(); + }, + ); + let tlsSocketEnded = false; + tlsSocket.on('end', () => { + tlsSocketEnded = true; + if (utpConn.destroyed) { + tlsSocket.destroy(); + } else { + tlsSocket.end(); + tlsSocket.destroy(); + } + }); + tlsSocket.on('close', () => { + resolveClientCloseP(); + }); + await send(networkUtils.pingBuffer); + expect(proxy.getConnectionReverseCount()).toBe(1); + await clientReadyP; + await clientSecureConnectP; + await serverConnP; + await proxy.closeConnectionReverse( + '127.0.0.1' as Host, + utpSocketPort as Port, + ); + expect(proxy.getConnectionReverseCount()).toBe(0); + await clientCloseP; + await serverConnEndP; + await serverConnClosedP; + expect(tlsSocketEnded).toBe(true); + utpSocket.off('message', handleMessage); + utpSocket.close(); + utpSocket.unref(); + await proxy.stop(); + await serverClose(); + + // Checking callback data + expect(callbackData?.remoteNodeId.equals(clientNodeId)).toBe(true); + expect(callbackData?.remoteHost).toEqual('127.0.0.1'); + expect(callbackData?.remotePort).toEqual(utpSocketPort); + expect(callbackData?.type).toEqual('reverse'); + }); }); diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 52d1ce674..1814c43b2 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -35,8 +35,11 @@ import * as GRPCErrors from '@/grpc/errors'; import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; +import { timerStart } from '@/utils'; +import Queue from '@/nodes/Queue'; +import * as testNodesUtils from './utils'; import * as testUtils from '../utils'; -import * as grpcTestUtils from '../grpc/utils'; +import * as testGrpcUtils from '../grpc/utils'; const destroyCallback = async () => {}; @@ -73,7 +76,7 @@ describe(`${NodeConnection.name} test`, () => { const password = 'password'; const node: NodeInfo = { - id: nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()), + id: nodesUtils.encodeNodeId(testNodesUtils.generateRandomNodeId()), chain: {}, }; @@ -83,6 +86,7 @@ describe(`${NodeConnection.name} test`, () => { let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; let serverNodeGraph: NodeGraph; + let serverQueue: Queue; let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; @@ -229,22 +233,26 @@ describe(`${NodeConnection.name} test`, () => { logger, }); + serverQueue = new Queue({ logger }); serverNodeConnectionManager = new NodeConnectionManager({ keyManager: serverKeyManager, nodeGraph: serverNodeGraph, proxy: serverProxy, + queue: serverQueue, logger, }); - await serverNodeConnectionManager.start(); - serverNodeManager = new NodeManager({ db: serverDb, sigchain: serverSigchain, keyManager: serverKeyManager, nodeGraph: serverNodeGraph, nodeConnectionManager: serverNodeConnectionManager, + queue: serverQueue, logger: logger, }); + await serverQueue.start(); + await serverNodeManager.start(); + await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ keyManager: serverKeyManager, vaultsPath: serverVaultsPath, @@ -358,6 +366,8 @@ describe(`${NodeConnection.name} test`, () => { await serverNodeGraph.stop(); await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); + await serverNodeManager.stop(); + await serverQueue.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentServer.stop(); @@ -489,7 +499,7 @@ describe(`${NodeConnection.name} test`, () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -524,7 +534,7 @@ describe(`${NodeConnection.name} test`, () => { targetNodeId: targetNodeId, targetHost: '128.0.0.1' as Host, targetPort: 12345 as Port, - connConnectTime: 300, + timer: timerStart(300), proxy: clientproxy, keyManager: clientKeyManager, nodeConnectionManager: dummyNodeConnectionManager, @@ -599,7 +609,7 @@ describe(`${NodeConnection.name} test`, () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -642,7 +652,7 @@ describe(`${NodeConnection.name} test`, () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); const nodeConnectionP = NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -680,7 +690,7 @@ describe(`${NodeConnection.name} test`, () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 500, + timer: timerStart(500), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -710,7 +720,7 @@ describe(`${NodeConnection.name} test`, () => { "should call `killSelf and throw if the server %s's during testUnaryFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -742,7 +752,7 @@ describe(`${NodeConnection.name} test`, () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -755,7 +765,7 @@ describe(`${NodeConnection.name} test`, () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); @@ -779,7 +789,7 @@ describe(`${NodeConnection.name} test`, () => { "should call `killSelf and throw if the server %s's during testStreamFail", async (option) => { let nodeConnection: - | NodeConnection + | NodeConnection | undefined; let testProxy: Proxy | undefined; let testProcess: child_process.ChildProcessWithoutNullStreams | undefined; @@ -811,7 +821,7 @@ describe(`${NodeConnection.name} test`, () => { const killSelfCheck = jest.fn(); const killSelfP = promise(); nodeConnection = await NodeConnection.createNodeConnection({ - connConnectTime: 2000, + timer: timerStart(2000), proxy: clientproxy, keyManager: clientKeyManager, logger: logger, @@ -824,7 +834,7 @@ describe(`${NodeConnection.name} test`, () => { targetHost: testProxy.getProxyHost(), targetPort: testProxy.getProxyPort(), clientFactory: (args) => - grpcTestUtils.GRPCClientTest.createGRPCClientTest(args), + testGrpcUtils.GRPCClientTest.createGRPCClientTest(args), }); const client = nodeConnection.getClient(); diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index a6c3638cb..f0fe65d4e 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -1,11 +1,13 @@ -import type { NodeAddress, NodeData, NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeAddress, NodeBucket, NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -14,13 +16,11 @@ import Proxy from '@/network/Proxy'; import GRPCClientAgent from '@/agent/GRPCClientAgent'; import * as nodesUtils from '@/nodes/utils'; -import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; import * as nodesPB from '@/proto/js/polykey/v1/nodes/nodes_pb'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import * as nodesTestUtils from './utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from './utils'; describe(`${NodeConnectionManager.name} general test`, () => { const logger = new Logger( @@ -75,8 +75,8 @@ describe(`${NodeConnectionManager.name} general test`, () => { let keyManager: KeyManager; let db: DB; let proxy: Proxy; - let nodeGraph: NodeGraph; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -126,6 +126,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -191,6 +192,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -216,6 +221,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); afterEach(async () => { + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -232,9 +238,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 1: node already exists in the local node graph (no contact required) const nodeId = nodeId1; @@ -259,9 +266,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 2: node can be found on the remote node const nodeId = nodeId1; @@ -300,9 +308,10 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { // Case 3: node exhausts all contacts and cannot find node const nodeId = nodeId1; @@ -326,9 +335,9 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: 22222 as Port, } as NodeAddress); // Un-findable Node cannot be found - await expect(() => - nodeConnectionManager.findNode(nodeId), - ).rejects.toThrowError(nodesErrors.ErrorNodeGraphNodeIdNotFound); + await expect(nodeConnectionManager.findNode(nodeId)).resolves.toEqual( + undefined, + ); await server.stop(); } finally { @@ -337,129 +346,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { }, global.failedConnectionTimeout * 2, ); - test('finds a single closest node', async () => { - // NodeConnectionManager under test - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Find the closest nodes to some node, NODEID3 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest).toContainEqual({ - id: newNode2Id, - distance: 121n, - address: { host: '227.1.1.1', port: 4567 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds 3 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Add 3 nodes - await nodeGraph.setNode(nodeId1, { - host: '2.2.2.2', - port: 2222, - } as NodeAddress); - await nodeGraph.setNode(nodeId2, { - host: '3.3.3.3', - port: 3333, - } as NodeAddress); - await nodeGraph.setNode(nodeId3, { - host: '4.4.4.4', - port: 4444, - } as NodeAddress); - - // Find the closest nodes to some node, NODEID4 - const closest = await nodeConnectionManager.getClosestLocalNodes(nodeId3); - expect(closest.length).toBe(5); - expect(closest).toContainEqual({ - id: nodeId3, - distance: 0n, - address: { host: '4.4.4.4', port: 4444 }, - }); - expect(closest).toContainEqual({ - id: nodeId2, - distance: 116n, - address: { host: '3.3.3.3', port: 3333 }, - }); - expect(closest).toContainEqual({ - id: nodeId1, - distance: 121n, - address: { host: '2.2.2.2', port: 2222 }, - }); - } finally { - await nodeConnectionManager.stop(); - } - }); - test('finds the 20 closest nodes', async () => { - const nodeConnectionManager = new NodeConnectionManager({ - keyManager, - nodeGraph, - proxy, - logger: nodeConnectionManagerLogger, - }); - await nodeConnectionManager.start(); - try { - // Generate the node ID to find the closest nodes to (in bucket 100) - const nodeId = keyManager.getNodeId(); - const nodeIdToFind = nodesTestUtils.generateNodeIdForBucket(nodeId, 100); - // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; - for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeIdToFind, - i, - ); - const nodeAddress = { - host: (i + '.' + i + '.' + i + '.' + i) as Host, - port: i as Port, - }; - await nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(nodeIdToFind, closeNodeId), - }); - } - // Now create and add 10 more nodes that are far away from this node - for (let i = 1; i <= 10; i++) { - const farNodeId = nodeIdGenerator(i); - const nodeAddress = { - host: `${i}.${i}.${i}.${i}` as Host, - port: i as Port, - }; - await nodeGraph.setNode(farNodeId, nodeAddress); - } - - // Find the closest nodes to the original generated node ID - const closest = await nodeConnectionManager.getClosestLocalNodes( - nodeIdToFind, - ); - // We should always only receive k nodes - expect(closest.length).toBe(nodeGraph.maxNodesPerBucket); - // Retrieved closest nodes should be exactly the same as the ones we added - expect(closest).toEqual(addedClosestNodes); - } finally { - await nodeConnectionManager.stop(); - } - }); test('receives 20 closest local nodes from connected target', async () => { let serverPKAgent: PolykeyAgent | undefined; let nodeConnectionManager: NodeConnectionManager | undefined; @@ -476,10 +362,11 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: logger.getChild('NodeConnectionManager'), }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const targetNodeId = serverPKAgent.keyManager.getNodeId(); await nodeGraph.setNode(targetNodeId, { host: serverPKAgent.proxy.getProxyHost(), @@ -487,9 +374,9 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); // Now generate and add 20 nodes that will be close to this node ID - const addedClosestNodes: NodeData[] = []; + const addedClosestNodes: NodeBucket = []; for (let i = 1; i < 101; i += 5) { - const closeNodeId = nodesTestUtils.generateNodeIdForBucket( + const closeNodeId = testNodesUtils.generateNodeIdForBucket( targetNodeId, i, ); @@ -498,11 +385,13 @@ describe(`${NodeConnectionManager.name} general test`, () => { port: i as Port, }; await serverPKAgent.nodeGraph.setNode(closeNodeId, nodeAddress); - addedClosestNodes.push({ - id: closeNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance(targetNodeId, closeNodeId), - }); + addedClosestNodes.push([ + closeNodeId, + { + address: nodeAddress, + lastUpdated: 0, + }, + ]); } // Now create and add 10 more nodes that are far away from this node for (let i = 1; i <= 10; i++) { @@ -521,7 +410,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { ); // Sort the received nodes on distance such that we can check its equality // with addedClosestNodes - closest.sort(nodesUtils.sortByDistance); + nodesUtils.bucketSortByDistance(closest, targetNodeId); expect(closest.length).toBe(20); expect(closest).toEqual(addedClosestNodes); } finally { @@ -545,14 +434,15 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); - const targetNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); + const targetNodeId = testNodesUtils.generateRandomNodeId(); await nodeConnectionManager.sendHolePunchMessage( remoteNodeId1, sourceNodeId, @@ -582,13 +472,14 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // To test this we need to... // 2. call relayHolePunchMessage // 3. check that the relevant call was made. - const sourceNodeId = testUtils.generateRandomNodeId(); + const sourceNodeId = testNodesUtils.generateRandomNodeId(); const relayMessage = new nodesPB.Relay(); relayMessage.setSrcId(nodesUtils.encodeNodeId(sourceNodeId)); relayMessage.setTargetId(nodesUtils.encodeNodeId(remoteNodeId1)); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index 7bb154f36..82ca1e20c 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -1,11 +1,13 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -16,7 +18,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; -import { withF } from '@/utils'; +import { withF, timerStart } from '@/utils'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { const logger = new Logger( @@ -74,6 +76,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { let proxy: Proxy; let nodeGraph: NodeGraph; + let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -84,6 +87,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -98,7 +102,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode1'), }); @@ -107,7 +111,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { password, nodePath: path.join(dataDir2, 'remoteNode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: serverHost, }, logger: logger.getChild('remoteNode2'), }); @@ -149,6 +153,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); + queue = new Queue({ + logger: logger.getChild('queue'), + }); + await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -174,6 +182,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); afterEach(async () => { + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -192,9 +201,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; const initialConnLock = connections.get( @@ -219,9 +229,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; const initialConnLock = connections.get( @@ -262,9 +273,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; const initialConnLock = connections.get( @@ -296,9 +308,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; @@ -314,7 +327,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }; // Creating the generator - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( remoteNodeId1, async function* () { yield* testGenerator(); @@ -360,10 +373,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, connConnectTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Add the dummy node await nodeGraph.setNode(dummyNodeId, { host: '125.0.0.1' as Host, @@ -397,9 +411,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; expect(connections.size).toBe(0); @@ -425,9 +440,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; expect(connections.size).toBe(0); @@ -460,9 +476,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; const initialConnLock = connections.get( @@ -497,9 +514,10 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, + queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Do testing // set up connections await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -527,4 +545,87 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { await nodeConnectionManager?.stop(); } }); + + // New ping tests + test('should ping node with address', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), + ); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping non existent node', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + + // Pinging node + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + '127.1.2.3' as Host, + 55555 as Port, + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); + test('should fail to ping node if NodeId does not match', async () => { + // NodeConnectionManager under test + let nodeConnectionManager: NodeConnectionManager | undefined; + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId1, + remoteNode2.proxy.getProxyHost(), + remoteNode2.proxy.getProxyPort(), + timerStart(1000), + ), + ).toEqual(false); + + expect( + await nodeConnectionManager.pingNode( + remoteNodeId2, + remoteNode1.proxy.getProxyHost(), + remoteNode1.proxy.getProxyPort(), + timerStart(1000), + ), + ).toEqual(false); + } finally { + await nodeConnectionManager?.stop(); + } + }); }); diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index b5ecf3e3c..b63a4ae54 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -1,11 +1,13 @@ -import type { NodeId, SeedNodes } from '@/nodes/types'; +import type { NodeId, NodeIdEncoded, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type { Sigchain } from '@/sigchain'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import NodeManager from '@/nodes/NodeManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -15,6 +17,7 @@ import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; +import Queue from '@/nodes/Queue'; describe(`${NodeConnectionManager.name} seed nodes test`, () => { const logger = new Logger( @@ -77,6 +80,10 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { + setNode: jest.fn(), + refreshBucketQueueAdd: jest.fn(), + } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -116,6 +123,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); beforeEach(async () => { + // Clearing nodes from graphs + for await (const [nodeId] of remoteNode1.nodeGraph.getNodes()) { + await remoteNode1.nodeGraph.unsetNode(nodeId); + } + for await (const [nodeId] of remoteNode2.nodeGraph.getNodes()) { + await remoteNode2.nodeGraph.unsetNode(nodeId); + } dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -184,10 +198,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, + queue: new Queue({ + logger: logger.getChild('queue'), + }), seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toContainEqual(nodeId1); expect(seedNodes).toContainEqual(nodeId2); @@ -207,10 +224,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, + queue: new Queue({ + logger: logger.getChild('queue'), + }), seedNodes: dummySeedNodes, logger: logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); try { const seedNodes = nodeConnectionManager.getSeedNodes(); expect(seedNodes).toHaveLength(3); @@ -223,6 +243,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }); test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -233,13 +260,26 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, logger: logger, }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + queue, + sigchain: {} as Sigchain, + }); + await queue.start(); + await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, port: serverPort, @@ -248,17 +288,85 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager }); await nodeConnectionManager.syncNodeGraph(); expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); } finally { + mockedRefreshBucket.mockRestore(); + await nodeManager?.stop(); await nodeConnectionManager?.stop(); + await queue?.stop(); + } + }); + test('should call refreshBucket when syncing nodeGraph', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); + try { + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + queue = new Queue({ logger }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + queue, + seedNodes, + logger: logger, + }); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + queue, + }); + await queue.start(); + await nodeManager.start(); + await remoteNode1.nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await remoteNode2.nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + await nodeConnectionManager.start({ nodeManager }); + await nodeConnectionManager.syncNodeGraph(); + await nodeManager.refreshBucketQueueDrained(); + expect(mockedRefreshBucket).toHaveBeenCalled(); + } finally { + mockedRefreshBucket.mockRestore(); + await nodeManager?.stop(); + await nodeConnectionManager?.stop(); + await queue?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; + let nodeManager: NodeManager | undefined; + let queue: Queue | undefined; + const mockedRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + mockedRefreshBucket.mockImplementation(async () => {}); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -282,22 +390,118 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, + queue, seedNodes, connConnectTime: 500, logger: logger, }); - await nodeConnectionManager.start(); + nodeManager = new NodeManager({ + db, + keyManager, + logger, + nodeConnectionManager, + nodeGraph, + sigchain: {} as Sigchain, + queue, + }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // This should complete without error await nodeConnectionManager.syncNodeGraph(); // Information on remotes are found expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); } finally { + mockedRefreshBucket.mockRestore(); await nodeConnectionManager?.stop(); + await nodeManager?.stop(); + await queue?.stop(); + } + }); + test('should expand the network when nodes enter', async () => { + // Using a single seed node we need to check that each entering node adds itself to the seed node. + // Also need to check that the new nodes can be seen in the network. + let node1: PolykeyAgent | undefined; + let node2: PolykeyAgent | undefined; + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + try { + logger.setLevel(LogLevel.WARN); + node1 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node1'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + node2 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node2'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + seedNodes, + logger, + }); + + await node1.queue.drained(); + await node1.nodeManager.refreshBucketQueueDrained(); + await node2.queue.drained(); + await node2.nodeManager.refreshBucketQueueDrained(); + + const getAllNodes = async (node: PolykeyAgent) => { + const nodes: Array = []; + for await (const [nodeId] of node.nodeGraph.getNodes()) { + nodes.push(nodesUtils.encodeNodeId(nodeId)); + } + return nodes; + }; + const rNode1Nodes = await getAllNodes(remoteNode1); + const rNode2Nodes = await getAllNodes(remoteNode2); + const node1Nodes = await getAllNodes(node1); + const node2Nodes = await getAllNodes(node2); + + const nodeIdR1 = nodesUtils.encodeNodeId(remoteNodeId1); + const nodeIdR2 = nodesUtils.encodeNodeId(remoteNodeId2); + const nodeId1 = nodesUtils.encodeNodeId(node1.keyManager.getNodeId()); + const nodeId2 = nodesUtils.encodeNodeId(node2.keyManager.getNodeId()); + expect(rNode1Nodes).toContain(nodeId1); + expect(rNode1Nodes).toContain(nodeId2); + expect(rNode2Nodes).toContain(nodeId1); + expect(rNode2Nodes).toContain(nodeId2); + expect(node1Nodes).toContain(nodeIdR1); + expect(node1Nodes).toContain(nodeIdR2); + expect(node1Nodes).toContain(nodeId2); + expect(node2Nodes).toContain(nodeIdR1); + expect(node2Nodes).toContain(nodeIdR2); + expect(node2Nodes).toContain(nodeId1); + } finally { + logger.setLevel(LogLevel.WARN); + await node1?.stop(); + await node1?.destroy(); + await node2?.stop(); + await node2?.destroy(); } }); }); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 7cc443d07..0222d45ae 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -1,6 +1,8 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -85,6 +87,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeEach(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -244,10 +247,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection await expect( @@ -284,10 +288,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const resultP = nodeConnectionManager.withConnF(dummyNodeId, async () => { @@ -327,10 +332,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // Attempt a connection const connectionAttemptP = nodeConnectionManager.withConnF( @@ -370,10 +376,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -426,10 +433,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -504,10 +512,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -575,10 +584,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -594,7 +604,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { const firstConnection = firstConnAndLock?.connection; // Resolves if the shutdownCallback was called - const gen = await nodeConnectionManager.withConnG( + const gen = nodeConnectionManager.withConnG( agentNodeId, async function* (): AsyncGenerator { // Throw an error here @@ -651,10 +661,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; @@ -727,10 +738,11 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, + queue: {} as Queue, logger: logger, connConnectTime: 2000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnapping connection map const connections = nodeConnectionManager.connections; diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index 5e48eaaaf..018e6efac 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,5 +1,7 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; +import type NodeManager from 'nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -78,6 +80,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keysUtils, 'generateDeterministicKeyPair', ); + const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; beforeAll(async () => { mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { @@ -186,10 +189,11 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -223,10 +227,11 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -276,9 +281,10 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger: nodeConnectionManagerLogger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; await nodeConnectionManager.withConnF(remoteNodeId1, nop); diff --git a/tests/nodes/NodeGraph.test.ts b/tests/nodes/NodeGraph.test.ts index 6b9eec700..66b958716 100644 --- a/tests/nodes/NodeGraph.test.ts +++ b/tests/nodes/NodeGraph.test.ts @@ -1,59 +1,46 @@ -import type { Host, Port } from '@/network/types'; -import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import type { + NodeId, + NodeData, + NodeAddress, + NodeBucket, + NodeBucketIndex, +} from '@/nodes/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; -import * as nodesErrors from '@/nodes/errors'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; -import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; -import Sigchain from '@/sigchain/Sigchain'; -import * as nodesTestUtils from './utils'; +import * as nodesErrors from '@/nodes/errors'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; +import * as testUtils from '../utils'; describe(`${NodeGraph.name} test`, () => { - const localHost = '127.0.0.1' as Host; - const port = 0 as Port; const password = 'password'; - let nodeGraph: NodeGraph; - let nodeId: NodeId; - - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const dummyNode = nodesUtils.decodeNodeId( - 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', - )!; - - const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.WARN, [ new StreamHandler(), ]); - let proxy: Proxy; + let mockedGenerateKeyPair: jest.SpyInstance; + let mockedGenerateDeterministicKeyPair: jest.SpyInstance; let dataDir: string; let keyManager: KeyManager; + let dbKey: Buffer; + let dbPath: string; let db: DB; - let nodeConnectionManager: NodeConnectionManager; - let sigchain: Sigchain; - - const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; - - const mockedGenerateDeterministicKeyPair = jest.spyOn( - keysUtils, - 'generateDeterministicKeyPair', - ); - - beforeEach(async () => { - mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { - return keysUtils.generateKeyPair(bits); - }); - + beforeAll(async () => { + const globalKeyPair = await testUtils.setupGlobalKeypair(); + mockedGenerateKeyPair = jest + .spyOn(keysUtils, 'generateKeyPair') + .mockResolvedValue(globalKeyPair); + mockedGenerateDeterministicKeyPair = jest + .spyOn(keysUtils, 'generateDeterministicKeyPair') + .mockResolvedValue(globalKeyPair); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -63,559 +50,1002 @@ describe(`${NodeGraph.name} test`, () => { keysPath, logger, }); - proxy = new Proxy({ - authToken: 'auth', - logger: logger, - }); - await proxy.start({ - serverHost: localHost, - serverPort: port, - tlsConfig: { - keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, - certChainPem: await keyManager.getRootCertChainPem(), - }, + dbKey = await keysUtils.generateKey(); + dbPath = `${dataDir}/db`; + }); + afterAll(async () => { + await keyManager.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, }); - const dbPath = `${dataDir}/db`; + mockedGenerateKeyPair.mockRestore(); + mockedGenerateDeterministicKeyPair.mockRestore(); + }); + beforeEach(async () => { db = await DB.createDB({ dbPath, logger, crypto: { - key: keyManager.dbKey, + key: dbKey, ops: { encrypt: keysUtils.encryptWithKey, decrypt: keysUtils.decryptWithKey, }, }, }); - sigchain = await Sigchain.createSigchain({ - keyManager: keyManager, - db: db, - logger: logger, - }); - nodeGraph = await NodeGraph.createNodeGraph({ + }); + afterEach(async () => { + await db.stop(); + await db.destroy(); + }); + test('get, set and unset node IDs', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - nodeConnectionManager = new NodeConnectionManager({ - keyManager: keyManager, - nodeGraph: nodeGraph, - proxy: proxy, - logger: logger, - }); - await nodeConnectionManager.start(); - // Retrieve the NodeGraph reference from NodeManager - nodeId = keyManager.getNodeId(); - }); + let nodeId1: NodeId; + do { + nodeId1 = testNodesUtils.generateRandomNodeId(); + } while (nodeId1.equals(keyManager.getNodeId())); + let nodeId2: NodeId; + do { + nodeId2 = testNodesUtils.generateRandomNodeId(); + } while (nodeId2.equals(keyManager.getNodeId())); - afterEach(async () => { - await db.stop(); - await sigchain.stop(); - await nodeConnectionManager.stop(); - await nodeGraph.stop(); - await keyManager.stop(); - await proxy.stop(); - await fs.promises.rm(dataDir, { - force: true, - recursive: true, + await nodeGraph.setNode(nodeId1, { + host: '10.0.0.1', + port: 1234, + } as NodeAddress); + const nodeData1 = await nodeGraph.getNode(nodeId1); + expect(nodeData1).toStrictEqual({ + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), }); + await utils.sleep(1000); + await nodeGraph.setNode(nodeId2, { + host: 'abc.com', + port: 8978, + } as NodeAddress); + const nodeData2 = await nodeGraph.getNode(nodeId2); + expect(nodeData2).toStrictEqual({ + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }); + expect(nodeData2!.lastUpdated > nodeData1!.lastUpdated).toBe(true); + const nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(2); + expect(nodes).toContainEqual([ + nodeId1, + { + address: { + host: '10.0.0.1', + port: 1234, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(nodes).toContainEqual([ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ]); + await nodeGraph.unsetNode(nodeId1); + expect(await nodeGraph.getNode(nodeId1)).toBeUndefined(); + expect(await utils.asyncIterableArray(nodeGraph.getNodes())).toStrictEqual([ + [ + nodeId2, + { + address: { + host: 'abc.com', + port: 8978, + }, + lastUpdated: expect.any(Number), + }, + ], + ]); + await nodeGraph.unsetNode(nodeId2); + await nodeGraph.stop(); }); - - test('NodeGraph readiness', async () => { - const nodeGraph2 = await NodeGraph.createNodeGraph({ + test('get all nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, logger, }); - // @ts-ignore - await expect(nodeGraph2.destroy()).rejects.toThrow( - nodesErrors.ErrorNodeGraphRunning, - ); - // Should be a noop - await nodeGraph2.start(); - await nodeGraph2.stop(); - await nodeGraph2.destroy(); - await expect(async () => { - await nodeGraph2.start(); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - await expect(async () => { - await nodeGraph2.getBucket(0); - }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); - }); - test('knows node (true and false case)', async () => { - // Known node - const nodeAddress1: NodeAddress = { - host: '127.0.0.1' as Host, - port: 11111 as Port, - }; - await nodeGraph.setNode(nodeId1, nodeAddress1); - expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); - - // Unknown node - expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); - }); - test('finds correct node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address - const foundAddress = await nodeGraph.getNode(newNode2Id); - expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); - }); - test('unable to find node address', async () => { - // New node added - const newNode2Id = nodeId1; - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Get node address (of non-existent node) - const foundAddress = await nodeGraph.getNode(dummyNode); - expect(foundAddress).toBeUndefined(); - }); - test('adds a single node into a bucket', async () => { - // New node added - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - // Check new node is in retrieved bucket from database - // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 - const bucket = await nodeGraph.getBucket(1); - expect(bucket).toBeDefined(); - expect(bucket![newNode2Id]).toEqual({ - address: { host: '227.1.1.1', port: 4567 }, - lastUpdated: expect.any(Date), + let nodeIds = Array.from({ length: 25 }, () => { + return testNodesUtils.generateRandomNodeId(); }); - }); - test('adds multiple nodes into the same bucket', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + let bucketIndexes: Array; + let nodes: NodeBucket; + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(0); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + } + nodes = await utils.asyncIterableArray(nodeGraph.getNodes()); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes ascending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + // Sorted by bucket indexes ascending explicitly + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('asc')); + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), + ); + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] <= bucketIndex; + }), + ).toBe(true); + nodes = await utils.asyncIterableArray(nodeGraph.getNodes('desc')); + expect(nodes).toHaveLength(25); + // Sorted by bucket indexes descending + bucketIndexes = nodes.map(([nodeId]) => + nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(4); - expect(bucket).toBeDefined(); - if (!bucket) fail('bucket should be defined, letting TS know'); - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), + expect( + bucketIndexes.slice(1).every((bucketIndex, i) => { + return bucketIndexes[i] >= bucketIndex; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('setting same node ID throws error', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), + await expect( + nodeGraph.setNode(keyManager.getNodeId(), { + host: '127.0.0.1', + port: 55555, + } as NodeAddress), + ).rejects.toThrow(nodesErrors.ErrorNodeGraphSameNodeId); + await nodeGraph.stop(); + }); + test('get bucket with 1 node', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), + let nodeId: NodeId; + do { + nodeId = testNodesUtils.generateRandomNodeId(); + } while (nodeId.equals(keyManager.getNodeId())); + // Set one node + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); + const bucketIndex = nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId); + const bucket = await nodeGraph.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + expect(bucket[0]).toStrictEqual([ + nodeId, + { + address: { + host: '127.0.0.1', + port: 55555, + }, + lastUpdated: expect.any(Number), + }, + ]); + expect(await nodeGraph.getBucketMeta(bucketIndex)).toStrictEqual({ + count: 1, }); - }); - test('adds a single node into different buckets', async () => { - // New node for bucket 3 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); - const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - // New node for bucket 255 (the highest possible bucket) - const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const bucket3 = await nodeGraph.getBucket(3); - const bucket351 = await nodeGraph.getBucket(255); - if (bucket3 && bucket351) { - expect(bucket3[newNode1Id]).toEqual({ - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(Date), - }); - expect(bucket351[newNode2Id]).toEqual({ - address: { host: '2.2.2.2', port: 2222 }, - lastUpdated: expect.any(Date), - }); + // Adjacent bucket should be empty + let bucketIndex_: number; + if (bucketIndex >= nodeId.length * 8 - 1) { + bucketIndex_ = bucketIndex - 1; + } else if (bucketIndex === 0) { + bucketIndex_ = bucketIndex + 1; } else { - // Should be unreachable - fail('Bucket undefined'); + bucketIndex_ = bucketIndex + 1; } + expect(await nodeGraph.getBucket(bucketIndex_)).toHaveLength(0); + expect(await nodeGraph.getBucketMeta(bucketIndex_)).toStrictEqual({ + count: 0, + }); + await nodeGraph.stop(); }); - test('deletes a single node (and removes bucket)', async () => { - // New node for bucket 2 - const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - // Check the bucket is there first - const bucket = await nodeGraph.getBucket(2); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); - } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check bucket no longer exists - const newBucket = await nodeGraph.getBucket(2); - expect(newBucket).toBeUndefined(); - }); - test('deletes a single node (and retains remainder of bucket)', async () => { - // Add 3 new nodes into bucket 4 - const bucketIndex = 4; - const newNode1Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 0, - ); - const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; - await nodeGraph.setNode(newNode1Id, newNode1Address); - - const newNode2Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 1, + test('get bucket with multiple nodes', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // Contiguous node IDs starting from 0 + let nodeIds = Array.from({ length: 25 }, (_, i) => + IdInternal.create( + utils.bigInt2Bytes(BigInt(i), keyManager.getNodeId().byteLength), + ), ); - const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; - await nodeGraph.setNode(newNode2Id, newNode2Address); - - const newNode3Id = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - 2, + nodeIds = nodeIds.filter( + (nodeId) => !nodeId.equals(keyManager.getNodeId()), ); - const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; - await nodeGraph.setNode(newNode3Id, newNode3Address); - // Based on XOR values, all 3 nodes should appear in bucket 4 - const bucket = await nodeGraph.getBucket(bucketIndex); - if (bucket) { - expect(bucket[newNode1Id]).toEqual({ - address: { host: '4.4.4.4', port: 4444 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + for (const nodeId of nodeIds) { + await utils.sleep(100); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: 55555, + } as NodeAddress); } - - // Delete the node - await nodeGraph.unsetNode(newNode1Id); - // Check node no longer exists in the bucket - const newBucket = await nodeGraph.getBucket(bucketIndex); - if (newBucket) { - expect(newBucket[newNode1Id]).toBeUndefined(); - expect(bucket[newNode2Id]).toEqual({ - address: { host: '5.5.5.5', port: 5555 }, - lastUpdated: expect.any(Date), - }); - expect(bucket[newNode3Id]).toEqual({ - address: { host: '6.6.6.6', port: 6666 }, - lastUpdated: expect.any(Date), - }); + // Use first and last buckets because node IDs may be split between buckets + const bucketIndexFirst = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[0], + ); + const bucketIndexLast = nodesUtils.bucketIndex( + keyManager.getNodeId(), + nodeIds[nodeIds.length - 1], + ); + const bucketFirst = await nodeGraph.getBucket(bucketIndexFirst); + const bucketLast = await nodeGraph.getBucket(bucketIndexLast); + let bucket: NodeBucket; + let bucketIndex: NodeBucketIndex; + if (bucketFirst.length >= bucketLast.length) { + bucket = bucketFirst; + bucketIndex = bucketIndexFirst; } else { - // Should be unreachable - fail('New bucket undefined'); + bucket = bucketLast; + bucketIndex = bucketIndexLast; } - }); - test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - // Keep a record of the first node ID that we added - const firstNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, + expect(bucket.length > 1).toBe(true); + let bucketNodeIds = bucket.map(([nodeId]) => nodeId); + // The node IDs must be sorted lexicographically + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 1; + }), + ).toBe(true); + // Sort by node ID asc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'asc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); + // Sort by node ID desc + bucket = await nodeGraph.getBucket(bucketIndex, 'nodeId', 'desc'); + bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); + // Sort by distance asc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'asc'); + let bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), + ); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); + // Sort by distance desc + bucket = await nodeGraph.getBucket(bucketIndex, 'distance', 'desc'); + bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), - nodeAddress, + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); + // Sort by lastUpdated asc + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'asc'); + let bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); + bucket = await nodeGraph.getBucket(bucketIndex, 'lastUpdated', 'desc'); + bucketLastUpdateds = bucket.map(([, nodeData]) => nodeData.lastUpdated); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get all buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 50; i++) { + await utils.sleep(50); + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + let bucketIndex_ = -1; + // Ascending order + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) < 0; + }), + ).toBe(true); + } + // There must have been at least 1 bucket + expect(bucketIndex_).not.toBe(-1); + // Descending order + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'nodeId', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketNodeIds = bucket.map(([nodeId]) => nodeId); + expect( + bucketNodeIds.slice(1).every((nodeId, i) => { + return Buffer.compare(bucketNodeIds[i], nodeId) > 0; + }), + ).toBe(true); + } + expect(bucketIndex_).not.toBe(keyManager.getNodeId().length * 8); + // Distance ascending order + // Lower distance buckets first + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - // Increment the current node ID + // It's the LAST bucket that fails this + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] <= distance; + }), + ).toBe(true); } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + // Distance descending order + // Higher distance buckets first + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'distance', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketDistances = bucket.map(([nodeId]) => + nodesUtils.nodeDistance(keyManager.getNodeId(), nodeId), ); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketDistances.slice(1).every((distance, i) => { + return bucketDistances[i] >= distance; + }), + ).toBe(true); } - - // Attempt to add a new node into this full bucket (increment the last node - // ID that was added) - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - nodeGraph.maxNodesPerBucket + 1, - ); - const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; - await nodeGraph.setNode(newNodeId, newNodeAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket (but no more) - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Last updated ascending order + // Bucket index is ascending + bucketIndex_ = -1; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'asc', + )) { + expect(bucketIndex > bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - // Ensure that this new node is in the bucket - expect(finalBucket[newNodeId]).toEqual({ - address: newNodeAddress, - lastUpdated: expect.any(Date), - }); - // NODEID1 should have been removed from this bucket (as this was the least active) - // The first node added should have been removed from this bucket (as this - // was the least active, purely because it was inserted first) - expect(finalBucket[firstNodeId]).toBeUndefined(); - } else { - // Should be unreachable - fail('Bucket undefined'); + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] <= lastUpdated; + }), + ).toBe(true); } - }); - test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { - // Add k nodes to the database (importantly, they all go into the same bucket) - const bucketIndex = 59; - const currNodeId = nodesTestUtils.generateNodeIdForBucket( - nodeId, - bucketIndex, - ); - // Keep a record of the first node ID that we added - // const firstNodeId = currNodeId; - let increment = 1; - for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { - // Add the current node ID - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode( - nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), - nodeAddress, + // Last updated descending order + // Bucket index is descending + bucketIndex_ = keyManager.getNodeId().length * 8; + for await (const [bucketIndex, bucket] of nodeGraph.getBuckets( + 'lastUpdated', + 'desc', + )) { + expect(bucketIndex < bucketIndex_).toBe(true); + bucketIndex_ = bucketIndex; + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(keyManager.getNodeId(), nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + const bucketLastUpdateds = bucket.map( + ([, nodeData]) => nodeData.lastUpdated, ); - // Increment the current node ID - skip for the last one to keep currNodeId - // as the last added node ID - if (i !== nodeGraph.maxNodesPerBucket) { - increment++; + expect( + bucketLastUpdateds.slice(1).every((lastUpdated, i) => { + return bucketLastUpdateds[i] >= lastUpdated; + }), + ).toBe(true); + } + await nodeGraph.stop(); + }); + test('reset buckets', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const buckets0 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + // Reset the buckets according to the new node ID + // Note that this should normally be only executed when the key manager NodeID changes + // This means methods that use the KeyManager's node ID cannot be used here in this test + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); } } - // All of these nodes are in bucket 59 - const originalBucket = await nodeGraph.getBucket(bucketIndex); - if (originalBucket) { - expect(Object.keys(originalBucket).length).toBe( - nodeGraph.maxNodesPerBucket, + expect(buckets1).not.toStrictEqual(buckets0); + // Resetting again should change the space + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + // Resetting to the same NodeId results in the same bucket structure + await nodeGraph.resetBuckets(nodeIdNew2); + const buckets3 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets3).toStrictEqual(buckets2); + // Resetting to an existing NodeId + const nodeIdExisting = buckets3[0][1][0][0]; + let nodeIdExistingFound = false; + await nodeGraph.resetBuckets(nodeIdExisting); + const buckets4 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets4.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets4) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + if (nodeId.equals(nodeIdExisting)) { + nodeIdExistingFound = true; + } + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdExisting, nodeId)).toBe( + bucketIndex, + ); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets4).not.toStrictEqual(buckets3); + // The existing node ID should not be put into the NodeGraph + expect(nodeIdExistingFound).toBe(false); + await nodeGraph.stop(); + }); + test('reset buckets is persistent', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const now = utils.getUnixtime(); + for (let i = 0; i < 100; i++) { + await nodeGraph.setNode(testNodesUtils.generateRandomNodeId(), { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const nodeIdNew1 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew1); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets1 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets1.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets1) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew1, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + const nodeIdNew2 = testNodesUtils.generateRandomNodeId(); + await nodeGraph.resetBuckets(nodeIdNew2); + await nodeGraph.stop(); + await nodeGraph.start(); + const buckets2 = await utils.asyncIterableArray(nodeGraph.getBuckets()); + expect(buckets2.length > 0).toBe(true); + for (const [bucketIndex, bucket] of buckets2) { + expect(bucket.length > 0).toBe(true); + for (const [nodeId, nodeData] of bucket) { + expect(nodeId.byteLength).toBe(32); + expect(nodesUtils.bucketIndex(nodeIdNew2, nodeId)).toBe(bucketIndex); + expect(nodeData.address.host).toBe('127.0.0.1'); + // Port of 0 is not allowed + expect(nodeData.address.port > 0).toBe(true); + expect(nodeData.address.port < 2 ** 16).toBe(true); + expect(nodeData.lastUpdated >= now).toBe(true); + } + } + expect(buckets2).not.toStrictEqual(buckets1); + await nodeGraph.stop(); + }); + test('get closest nodes, 40 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, ); - } else { - // Should be unreachable - fail('Bucket undefined'); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 20); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // If we tried to re-add the first node, it would simply remove the original - // first node, as this is the "least active" - // We instead want to check that we don't mistakenly delete a node if we're - // updating an existing one - // So, re-add the last node - const newLastAddress: NodeAddress = { - host: '30.30.30.30' as Host, - port: 30 as Port, - }; - await nodeGraph.setNode(currNodeId, newLastAddress); - - const finalBucket = await nodeGraph.getBucket(bucketIndex); - if (finalBucket) { - // We should still have a full bucket - expect(Object.keys(finalBucket).length).toEqual( - nodeGraph.maxNodesPerBucket, + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes lower than target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 50 + i, + i, ); - // Ensure that this new node is in the bucket - expect(finalBucket[currNodeId]).toEqual({ - address: newLastAddress, - lastUpdated: expect.any(Date), - }); - } else { - // Should be unreachable - fail('Bucket undefined'); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); } - }); - test('retrieves all buckets (in expected lexicographic order)', async () => { - // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) - // Bucket 1 (minimum): - - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); - const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); - - // Bucket 4 (multiple nodes in 1 bucket): - const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); - const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; - await nodeGraph.setNode(node41Id, node41Address); - const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); - const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; - await nodeGraph.setNode(node42Id, node42Address); - - // Bucket 10 (lexicographic ordering - should appear after 2): - const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); - const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; - await nodeGraph.setNode(node10Id, node10Address); + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Bucket 255 (maximum): - const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); - const node255Address = { - host: '255.255.255.255', - port: 255, - } as NodeAddress; - await nodeGraph.setNode(node255Id, node255Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - const buckets = await nodeGraph.getAllBuckets(); - expect(buckets.length).toBe(4); - // Buckets should be returned in lexicographic ordering (using hex keys to - // ensure the bucket indexes are in numberical order) - expect(buckets).toEqual([ - { - [node1Id]: { - address: { host: '1.1.1.1', port: 1111 }, - lastUpdated: expect.any(String), - }, - }, - { - [node41Id]: { - address: { host: '41.41.41.41', port: 4141 }, - lastUpdated: expect.any(String), - }, - [node42Id]: { - address: { host: '42.42.42.42', port: 4242 }, - lastUpdated: expect.any(String), - }, - }, - { - [node10Id]: { - address: { host: '10.10.10.10', port: 1010 }, - lastUpdated: expect.any(String), - }, - }, - { - [node255Id]: { - address: { host: '255.255.255.255', port: 255 }, - lastUpdated: expect.any(String), - }, - }, - ]); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); }); - test( - 'refreshes buckets', - async () => { - const initialNodes: Record = {}; - // Generate and add some nodes - for (let i = 1; i < 255; i += 20) { - const newNodeId = nodesTestUtils.generateNodeIdForBucket( - keyManager.getNodeId(), - i, - ); - const nodeAddress = { - host: hostGen(i), - port: i as Port, - }; - await nodeGraph.setNode(newNodeId, nodeAddress); - initialNodes[newNodeId] = { - id: newNodeId, - address: nodeAddress, - distance: nodesUtils.calculateDistance( - keyManager.getNodeId(), - newNodeId, - ), - }; - } + test('get closest nodes, 10 nodes lower than target, 30 nodes above, take 5', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 90 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId, 5); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Renew the keypair - await keyManager.renewRootKeyPair('newPassword'); - // Reset the test's node ID state - nodeId = keyManager.getNodeId(); - // Refresh the buckets - await nodeGraph.refreshBuckets(); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 5 nodes lower than target, 10 nodes above, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 95 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Get all the new buckets, and expect that each node is in the correct bucket - const newBuckets = await nodeGraph.getAllBuckets(); - let nodeCount = 0; - for (const b of newBuckets) { - for (const n of Object.keys(b)) { - const nodeId = IdInternal.fromString(n); - // Check that it was a node in the original DB - expect(initialNodes[nodeId]).toBeDefined(); - // Check it's in the correct bucket - const expectedIndex = nodesUtils.calculateBucketIndex( - keyManager.getNodeId(), - nodeId, - ); - const expectedBucket = await nodeGraph.getBucket(expectedIndex); - expect(expectedBucket).toBeDefined(); - expect(expectedBucket![nodeId]).toBeDefined(); - // Check it has the correct address - expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); - nodeCount++; - } - } - // We had less than k (20) nodes, so we expect that all nodes will be re-added - // If we had more than k nodes, we may lose some of them (because the nodes - // may be re-added to newly full buckets) - expect(Object.keys(initialNodes).length).toEqual(nodeCount); - }, - global.defaultTimeout * 4, - ); - test('updates node', async () => { - // New node added - const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); - const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; - await nodeGraph.setNode(node1Id, node1Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 40 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 40; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Check new node is in retrieved bucket from database - const bucket = await nodeGraph.getBucket(2); - const time1 = bucket![node1Id].lastUpdated; + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, 15 nodes above target, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + // Add 1 node to each bucket + for (let i = 0; i < 15; i++) { + const nodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 101 + i, + i, + ); + nodeIds.push([nodeId, {} as NodeData]); + await nodeGraph.setNode(nodeId, { + host: '127.0.0.1', + port: utils.getRandomInt(0, 2 ** 16), + } as NodeAddress); + } + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - // Update node and check that time is later - const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; - await nodeGraph.updateNode(node1Id, newNode1Address); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); + }); + test('get closest nodes, no nodes, take 20', async () => { + const nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + const baseNodeId = keyManager.getNodeId(); + const nodeIds: NodeBucket = []; + const targetNodeId = testNodesUtils.generateNodeIdForBucket( + baseNodeId, + 100, + 2, + ); + const result = await nodeGraph.getClosestNodes(targetNodeId); + nodesUtils.bucketSortByDistance(nodeIds, targetNodeId); + const a = nodeIds.map((a) => nodesUtils.encodeNodeId(a[0])); + const b = result.map((a) => nodesUtils.encodeNodeId(a[0])); + // Are the closest nodes out of all of the nodes + expect(a.slice(0, b.length)).toEqual(b); - const bucket2 = await nodeGraph.getBucket(2); - const time2 = bucket2![node1Id].lastUpdated; - expect(bucket2![node1Id].address).toEqual(newNode1Address); - expect(time1 < time2).toBeTruthy(); + // Check that the list is strictly ascending + const closestNodeDistances = result.map(([nodeId]) => + nodesUtils.nodeDistance(targetNodeId, nodeId), + ); + expect( + closestNodeDistances.slice(1).every((distance, i) => { + return closestNodeDistances[i] < distance; + }), + ).toBe(true); + await nodeGraph.stop(); }); }); diff --git a/tests/nodes/NodeGraph.test.ts.old b/tests/nodes/NodeGraph.test.ts.old new file mode 100644 index 000000000..1960c02d3 --- /dev/null +++ b/tests/nodes/NodeGraph.test.ts.old @@ -0,0 +1,624 @@ +import type { Host, Port } from '@/network/types'; +import type { NodeAddress, NodeData, NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import { DB } from '@matrixai/db'; +import { IdInternal } from '@matrixai/id'; +import NodeConnectionManager from '@/nodes/NodeConnectionManager'; +import NodeGraph from '@/nodes/NodeGraph'; +import * as nodesErrors from '@/nodes/errors'; +import KeyManager from '@/keys/KeyManager'; +import * as keysUtils from '@/keys/utils'; +import ForwardProxy from '@/network/ForwardProxy'; +import ReverseProxy from '@/network/ReverseProxy'; +import * as nodesUtils from '@/nodes/utils'; +import Sigchain from '@/sigchain/Sigchain'; +import * as nodesTestUtils from './utils'; + +describe(`${NodeGraph.name} test`, () => { + const password = 'password'; + let nodeGraph: NodeGraph; + let nodeId: NodeId; + + const nodeId1 = IdInternal.create([ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 5, + ]); + const dummyNode = nodesUtils.decodeNodeId( + 'vi3et1hrpv2m2lrplcm7cu913kr45v51cak54vm68anlbvuf83ra0', + )!; + + const logger = new Logger(`${NodeGraph.name} test`, LogLevel.ERROR, [ + new StreamHandler(), + ]); + let fwdProxy: ForwardProxy; + let revProxy: ReverseProxy; + let dataDir: string; + let keyManager: KeyManager; + let db: DB; + let nodeConnectionManager: NodeConnectionManager; + let sigchain: Sigchain; + + const hostGen = (i: number) => `${i}.${i}.${i}.${i}` as Host; + + const mockedGenerateDeterministicKeyPair = jest.spyOn( + keysUtils, + 'generateDeterministicKeyPair', + ); + + beforeEach(async () => { + mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { + return keysUtils.generateKeyPair(bits); + }); + + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const keysPath = `${dataDir}/keys`; + keyManager = await KeyManager.createKeyManager({ + password, + keysPath, + logger, + }); + fwdProxy = new ForwardProxy({ + authToken: 'auth', + logger: logger, + }); + + revProxy = new ReverseProxy({ + logger: logger, + }); + + await fwdProxy.start({ + tlsConfig: { + keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, + certChainPem: await keyManager.getRootCertChainPem(), + }, + }); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: keyManager.dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); + sigchain = await Sigchain.createSigchain({ + keyManager: keyManager, + db: db, + logger: logger, + }); + nodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + nodeConnectionManager = new NodeConnectionManager({ + keyManager: keyManager, + nodeGraph: nodeGraph, + fwdProxy: fwdProxy, + revProxy: revProxy, + logger: logger, + }); + await nodeConnectionManager.start(); + // Retrieve the NodeGraph reference from NodeManager + nodeId = keyManager.getNodeId(); + }); + + afterEach(async () => { + await db.stop(); + await sigchain.stop(); + await nodeConnectionManager.stop(); + await nodeGraph.stop(); + await keyManager.stop(); + await fwdProxy.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); + }); + + test('NodeGraph readiness', async () => { + const nodeGraph2 = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + // @ts-ignore + await expect(nodeGraph2.destroy()).rejects.toThrow( + nodesErrors.ErrorNodeGraphRunning, + ); + // Should be a noop + await nodeGraph2.start(); + await nodeGraph2.stop(); + await nodeGraph2.destroy(); + await expect(async () => { + await nodeGraph2.start(); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphDestroyed); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + await expect(async () => { + await nodeGraph2.getBucket(0); + }).rejects.toThrow(nodesErrors.ErrorNodeGraphNotRunning); + }); + test('knows node (true and false case)', async () => { + // Known node + const nodeAddress1: NodeAddress = { + host: '127.0.0.1' as Host, + port: 11111 as Port, + }; + await nodeGraph.setNode(nodeId1, nodeAddress1); + expect(await nodeGraph.knowsNode(nodeId1)).toBeTruthy(); + + // Unknown node + expect(await nodeGraph.knowsNode(dummyNode)).toBeFalsy(); + }); + test('finds correct node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address + const foundAddress = await nodeGraph.getNode(newNode2Id); + expect(foundAddress).toEqual({ host: '227.1.1.1', port: 4567 }); + }); + test('unable to find node address', async () => { + // New node added + const newNode2Id = nodeId1; + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Get node address (of non-existent node) + const foundAddress = await nodeGraph.getNode(dummyNode); + expect(foundAddress).toBeUndefined(); + }); + test('adds a single node into a bucket', async () => { + // New node added + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const newNode2Address = { host: '227.1.1.1', port: 4567 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + // Check new node is in retrieved bucket from database + // bucketIndex = 1 as "NODEID1" XOR "NODEID2" = 3 + const bucket = await nodeGraph.getBucket(1); + expect(bucket).toBeDefined(); + expect(bucket![newNode2Id]).toEqual({ + address: { host: '227.1.1.1', port: 4567 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds multiple nodes into the same bucket', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(4); + expect(bucket).toBeDefined(); + if (!bucket) fail('bucket should be defined, letting TS know'); + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + }); + test('adds a single node into different buckets', async () => { + // New node for bucket 3 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 3); + const newNode1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + // New node for bucket 255 (the highest possible bucket) + const newNode2Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const newNode2Address = { host: '2.2.2.2', port: 2222 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const bucket3 = await nodeGraph.getBucket(3); + const bucket351 = await nodeGraph.getBucket(255); + if (bucket3 && bucket351) { + expect(bucket3[newNode1Id]).toEqual({ + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(Date), + }); + expect(bucket351[newNode2Id]).toEqual({ + address: { host: '2.2.2.2', port: 2222 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('deletes a single node (and removes bucket)', async () => { + // New node for bucket 2 + const newNode1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + // Check the bucket is there first + const bucket = await nodeGraph.getBucket(2); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check bucket no longer exists + const newBucket = await nodeGraph.getBucket(2); + expect(newBucket).toBeUndefined(); + }); + test('deletes a single node (and retains remainder of bucket)', async () => { + // Add 3 new nodes into bucket 4 + const bucketIndex = 4; + const newNode1Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 0, + ); + const newNode1Address = { host: '4.4.4.4', port: 4444 } as NodeAddress; + await nodeGraph.setNode(newNode1Id, newNode1Address); + + const newNode2Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 1, + ); + const newNode2Address = { host: '5.5.5.5', port: 5555 } as NodeAddress; + await nodeGraph.setNode(newNode2Id, newNode2Address); + + const newNode3Id = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + 2, + ); + const newNode3Address = { host: '6.6.6.6', port: 6666 } as NodeAddress; + await nodeGraph.setNode(newNode3Id, newNode3Address); + // Based on XOR values, all 3 nodes should appear in bucket 4 + const bucket = await nodeGraph.getBucket(bucketIndex); + if (bucket) { + expect(bucket[newNode1Id]).toEqual({ + address: { host: '4.4.4.4', port: 4444 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Delete the node + await nodeGraph.unsetNode(newNode1Id); + // Check node no longer exists in the bucket + const newBucket = await nodeGraph.getBucket(bucketIndex); + if (newBucket) { + expect(newBucket[newNode1Id]).toBeUndefined(); + expect(bucket[newNode2Id]).toEqual({ + address: { host: '5.5.5.5', port: 5555 }, + lastUpdated: expect.any(Date), + }); + expect(bucket[newNode3Id]).toEqual({ + address: { host: '6.6.6.6', port: 6666 }, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('New bucket undefined'); + } + }); + test('enforces k-bucket size, removing least active node when a new node is discovered', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + // Keep a record of the first node ID that we added + const firstNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, i), + nodeAddress, + ); + // Increment the current node ID + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // Attempt to add a new node into this full bucket (increment the last node + // ID that was added) + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + nodeGraph.maxNodesPerBucket + 1, + ); + const newNodeAddress = { host: '0.0.0.1' as Host, port: 1234 as Port }; + await nodeGraph.setNode(newNodeId, newNodeAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket (but no more) + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[newNodeId]).toEqual({ + address: newNodeAddress, + lastUpdated: expect.any(Date), + }); + // NODEID1 should have been removed from this bucket (as this was the least active) + // The first node added should have been removed from this bucket (as this + // was the least active, purely because it was inserted first) + expect(finalBucket[firstNodeId]).toBeUndefined(); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('enforces k-bucket size, retaining all nodes if adding a pre-existing node', async () => { + // Add k nodes to the database (importantly, they all go into the same bucket) + const bucketIndex = 59; + const currNodeId = nodesTestUtils.generateNodeIdForBucket( + nodeId, + bucketIndex, + ); + // Keep a record of the first node ID that we added + // const firstNodeId = currNodeId; + let increment = 1; + for (let i = 1; i <= nodeGraph.maxNodesPerBucket; i++) { + // Add the current node ID + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode( + nodesTestUtils.generateNodeIdForBucket(nodeId, bucketIndex, increment), + nodeAddress, + ); + // Increment the current node ID - skip for the last one to keep currNodeId + // as the last added node ID + if (i !== nodeGraph.maxNodesPerBucket) { + increment++; + } + } + // All of these nodes are in bucket 59 + const originalBucket = await nodeGraph.getBucket(bucketIndex); + if (originalBucket) { + expect(Object.keys(originalBucket).length).toBe( + nodeGraph.maxNodesPerBucket, + ); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + + // If we tried to re-add the first node, it would simply remove the original + // first node, as this is the "least active" + // We instead want to check that we don't mistakenly delete a node if we're + // updating an existing one + // So, re-add the last node + const newLastAddress: NodeAddress = { + host: '30.30.30.30' as Host, + port: 30 as Port, + }; + await nodeGraph.setNode(currNodeId, newLastAddress); + + const finalBucket = await nodeGraph.getBucket(bucketIndex); + if (finalBucket) { + // We should still have a full bucket + expect(Object.keys(finalBucket).length).toEqual( + nodeGraph.maxNodesPerBucket, + ); + // Ensure that this new node is in the bucket + expect(finalBucket[currNodeId]).toEqual({ + address: newLastAddress, + lastUpdated: expect.any(Date), + }); + } else { + // Should be unreachable + fail('Bucket undefined'); + } + }); + test('retrieves all buckets (in expected lexicographic order)', async () => { + // Bucket 0 is expected to never have any nodes (as nodeId XOR 0 = nodeId) + // Bucket 1 (minimum): + + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 1); + const node1Address = { host: '1.1.1.1', port: 1111 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Bucket 4 (multiple nodes in 1 bucket): + const node41Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4); + const node41Address = { host: '41.41.41.41', port: 4141 } as NodeAddress; + await nodeGraph.setNode(node41Id, node41Address); + const node42Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 4, 1); + const node42Address = { host: '42.42.42.42', port: 4242 } as NodeAddress; + await nodeGraph.setNode(node42Id, node42Address); + + // Bucket 10 (lexicographic ordering - should appear after 2): + const node10Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 10); + const node10Address = { host: '10.10.10.10', port: 1010 } as NodeAddress; + await nodeGraph.setNode(node10Id, node10Address); + + // Bucket 255 (maximum): + const node255Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 255); + const node255Address = { + host: '255.255.255.255', + port: 255, + } as NodeAddress; + await nodeGraph.setNode(node255Id, node255Address); + + const buckets = await nodeGraph.getAllBuckets(); + expect(buckets.length).toBe(4); + // Buckets should be returned in lexicographic ordering (using hex keys to + // ensure the bucket indexes are in numberical order) + expect(buckets).toEqual([ + { + [node1Id]: { + address: { host: '1.1.1.1', port: 1111 }, + lastUpdated: expect.any(String), + }, + }, + { + [node41Id]: { + address: { host: '41.41.41.41', port: 4141 }, + lastUpdated: expect.any(String), + }, + [node42Id]: { + address: { host: '42.42.42.42', port: 4242 }, + lastUpdated: expect.any(String), + }, + }, + { + [node10Id]: { + address: { host: '10.10.10.10', port: 1010 }, + lastUpdated: expect.any(String), + }, + }, + { + [node255Id]: { + address: { host: '255.255.255.255', port: 255 }, + lastUpdated: expect.any(String), + }, + }, + ]); + }); + test( + 'refreshes buckets', + async () => { + const initialNodes: Record = {}; + // Generate and add some nodes + for (let i = 1; i < 255; i += 20) { + const newNodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + i, + ); + const nodeAddress = { + host: hostGen(i), + port: i as Port, + }; + await nodeGraph.setNode(newNodeId, nodeAddress); + initialNodes[newNodeId] = { + id: newNodeId, + address: nodeAddress, + distance: nodesUtils.calculateDistance( + keyManager.getNodeId(), + newNodeId, + ), + }; + } + + // Renew the keypair + await keyManager.renewRootKeyPair('newPassword'); + // Reset the test's node ID state + nodeId = keyManager.getNodeId(); + // Refresh the buckets + await nodeGraph.refreshBuckets(); + + // Get all the new buckets, and expect that each node is in the correct bucket + const newBuckets = await nodeGraph.getAllBuckets(); + let nodeCount = 0; + for (const b of newBuckets) { + for (const n of Object.keys(b)) { + const nodeId = IdInternal.fromString(n); + // Check that it was a node in the original DB + expect(initialNodes[nodeId]).toBeDefined(); + // Check it's in the correct bucket + const expectedIndex = nodesUtils.calculateBucketIndex( + keyManager.getNodeId(), + nodeId, + ); + const expectedBucket = await nodeGraph.getBucket(expectedIndex); + expect(expectedBucket).toBeDefined(); + expect(expectedBucket![nodeId]).toBeDefined(); + // Check it has the correct address + expect(b[nodeId].address).toEqual(initialNodes[nodeId].address); + nodeCount++; + } + } + // We had less than k (20) nodes, so we expect that all nodes will be re-added + // If we had more than k nodes, we may lose some of them (because the nodes + // may be re-added to newly full buckets) + expect(Object.keys(initialNodes).length).toEqual(nodeCount); + }, + global.defaultTimeout * 4, + ); + test('updates node', async () => { + // New node added + const node1Id = nodesTestUtils.generateNodeIdForBucket(nodeId, 2); + const node1Address = { host: '1.1.1.1', port: 1 } as NodeAddress; + await nodeGraph.setNode(node1Id, node1Address); + + // Check new node is in retrieved bucket from database + const bucket = await nodeGraph.getBucket(2); + const time1 = bucket![node1Id].lastUpdated; + + // Update node and check that time is later + const newNode1Address = { host: '2.2.2.2', port: 2 } as NodeAddress; + await nodeGraph.updateNode(node1Id, newNode1Address); + + const bucket2 = await nodeGraph.getBucket(2); + const time2 = bucket2![node1Id].lastUpdated; + expect(bucket2![node1Id].address).toEqual(newNode1Address); + expect(time1 < time2).toBeTruthy(); + }); +}); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index f3de57cd8..b4e382109 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -7,6 +7,7 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import UTP from 'utp-native'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; @@ -16,8 +17,12 @@ import NodeManager from '@/nodes/NodeManager'; import Proxy from '@/network/Proxy'; import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; -import { promisify, sleep } from '@/utils'; +import { promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; +import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; +import * as nodesErrors from '@/nodes/errors'; +import * as nodesTestUtils from './utils'; +import { generateNodeIdForBucket } from './utils'; describe(`${NodeManager.name} test`, () => { const password = 'password'; @@ -26,6 +31,7 @@ describe(`${NodeManager.name} test`, () => { ]); let dataDir: string; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let proxy: Proxy; let keyManager: KeyManager; @@ -37,14 +43,22 @@ describe(`${NodeManager.name} test`, () => { const serverHost = '::1' as Host; const externalHost = '127.0.0.1' as Host; + const localhost = '127.0.0.1' as Host; + const port = 55556 as Port; const serverPort = 0 as Port; const externalPort = 0 as Port; const mockedGenerateDeterministicKeyPair = jest.spyOn( keysUtils, 'generateDeterministicKeyPair', ); + const mockedPingNode = jest.fn(); // Jest.spyOn(NodeManager.prototype, 'pingNode'); + const dummyNodeConnectionManager = { + pingNode: mockedPingNode, + } as unknown as NodeConnectionManager; beforeEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); mockedGenerateDeterministicKeyPair.mockImplementation((bits, _) => { return keysUtils.generateKeyPair(bits); }); @@ -99,16 +113,20 @@ describe(`${NodeManager.name} test`, () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, + queue, proxy, logger, }); - await nodeConnectionManager.start(); }); afterEach(async () => { + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); + await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await sigchain.stop(); @@ -129,6 +147,7 @@ describe(`${NodeManager.name} test`, () => { 'pings node', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -148,14 +167,17 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, nodeGraph, nodeConnectionManager, + queue, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set server node offline await server.stop(); @@ -192,6 +214,7 @@ describe(`${NodeManager.name} test`, () => { expect(active3).toBe(false); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -200,6 +223,7 @@ describe(`${NodeManager.name} test`, () => { ); // Ping needs to timeout (takes 20 seconds + setup + pulldown) test('getPublicKey', async () => { let server: PolykeyAgent | undefined; + let nodeManager: NodeManager | undefined; try { server = await PolykeyAgent.createPolykeyAgent({ password: 'password', @@ -219,14 +243,17 @@ describe(`${NodeManager.name} test`, () => { }; await nodeGraph.setNode(serverNodeId, serverNodeAddress); - const nodeManager = new NodeManager({ + nodeManager = new NodeManager({ db, sigchain, keyManager, nodeGraph, nodeConnectionManager, + queue, logger, }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // We want to get the public key of the server const key = await nodeManager.getPublicKey(serverNodeId); @@ -234,6 +261,7 @@ describe(`${NodeManager.name} test`, () => { expect(key).toEqual(expectedKey); } finally { // Clean up + await nodeManager?.stop(); await server?.stop(); await server?.destroy(); } @@ -403,26 +431,661 @@ describe(`${NodeManager.name} test`, () => { } }); test('can request chain data', async () => { - // Cross signing claims - await y.nodeManager.claimNode(xNodeId); + let nodeManager: NodeManager | undefined; + try { + // Cross signing claims + await y.nodeManager.claimNode(xNodeId); - const nodeManager = new NodeManager({ - db, - sigchain, - keyManager, - nodeGraph, - nodeConnectionManager, - logger, + nodeManager = new NodeManager({ + db, + sigchain, + keyManager, + nodeGraph, + nodeConnectionManager, + queue, + logger, + }); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + + await nodeGraph.setNode(xNodeId, xNodeAddress); + + // We want to get the public key of the server + const chainData = JSON.stringify( + await nodeManager.requestChainData(xNodeId), + ); + expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); + expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + } finally { + await nodeManager?.stop(); + } + }); + }); + test('should add a node when bucket has room', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, {} as NodeAddress); + + // Checking bucket + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(1); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should update a node if node exists', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 11111 as Port, }); - await nodeGraph.setNode(xNodeId, xNodeAddress); + const nodeData = (await nodeGraph.getNode(nodeId))!; + await sleep(1100); - // We want to get the public key of the server - const chainData = JSON.stringify( - await nodeManager.requestChainData(xNodeId), + // Should update the node + await nodeManager.setNode(nodeId, { + host: '' as Host, + port: 22222 as Port, + }); + + const newNodeData = (await nodeGraph.getNode(nodeId))!; + expect(newNodeData.address.port).not.toEqual(nodeData.address.port); + expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not add node if bucket is full and old node is alive', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + const oldestNode = await nodeGraph.getNode(oldestNodeId!); + // Waiting for a second to tick over + await sleep(1500); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was not added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeUndefined(); + // Oldest node was updated + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node if bucket is full, old node is alive and force is set', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(true); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode( + nodeId, + { port: 55555 } as NodeAddress, + false, + true, + ); + // Bucket still contains max nodes + const bucket = await nodeManager.getBucket(bucketIndex); + expect(bucket).toHaveLength(nodeGraph.nodeBucketLimit); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node if bucket is full and old node is dead', async () => { + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const localNodeId = keyManager.getNodeId(); + const bucketIndex = 100; + // Creating 20 nodes in bucket + for (let i = 1; i <= 20; i++) { + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + i, + ); + await nodeManager.setNode(nodeId, { port: i } as NodeAddress); + } + const nodeId = nodesTestUtils.generateNodeIdForBucket( + localNodeId, + bucketIndex, + ); + // Mocking ping + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); + nodeManagerPingMock.mockResolvedValue(false); + const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); + // Adding a new node with bucket full + await nodeManager.setNode(nodeId, { port: 55555 } as NodeAddress, true); + // New node was added + const node = await nodeGraph.getNode(nodeId); + expect(node).toBeDefined(); + // Oldest node was removed + const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); + expect(oldestNodeNew).toBeUndefined(); + nodeManagerPingMock.mockRestore(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add node when an incoming connection is established', async () => { + let server: PolykeyAgent | undefined; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: {} as NodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + server = await PolykeyAgent.createPolykeyAgent({ + password: 'password', + nodePath: path.join(dataDir, 'server'), + keysConfig: { + rootKeyPairBits: 2048, + }, + networkConfig: { + proxyHost: localhost, + }, + logger: logger, + }); + const serverNodeId = server.keyManager.getNodeId(); + const serverNodeAddress: NodeAddress = { + host: server.proxy.getProxyHost(), + port: server.proxy.getProxyPort(), + }; + await nodeGraph.setNode(serverNodeId, serverNodeAddress); + + const expectedHost = proxy.getProxyHost(); + const expectedPort = proxy.getProxyPort(); + const expectedNodeId = keyManager.getNodeId(); + + const nodeData = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData).toBeUndefined(); + + // Now we want to connect to the server by making an echo request. + await nodeConnectionManager.withConnF(serverNodeId, async (conn) => { + const client = conn.getClient(); + await client.echo(new utilsPB.EchoMessage().setChallenge('hello')); + }); + + const nodeData2 = await server.nodeGraph.getNode(expectedNodeId); + expect(nodeData2).toBeDefined(); + expect(nodeData2?.address.host).toEqual(expectedHost); + expect(nodeData2?.address.port).toEqual(expectedPort); + } finally { + // Clean up + await server?.stop(); + await server?.destroy(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not add nodes to full bucket if pings succeeds', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings succeed, node not added + mockedPingNode.mockImplementation(async (_) => true); + const newNode = generateNodeIdForBucket(nodeId, 100, 21); + await nodeManager.setNode(newNode, address); + expect(await listBucket(100)).not.toContain( + nodesUtils.encodeNodeId(newNode), + ); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add nodes to full bucket if pings fail', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Helpers + const listBucket = async (bucketIndex: number) => { + const bucket = await nodeManager.getBucket(bucketIndex); + return bucket?.map(([nodeId]) => nodesUtils.encodeNodeId(nodeId)); + }; + + // Pings fail, new nodes get added + mockedPingNode.mockImplementation(async (_) => false); + const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); + const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); + const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); + await nodeManager.setNode(newNode1, address); + await nodeManager.setNode(newNode2, address); + await nodeManager.setNode(newNode3, address); + await queue.drained(); + const list = await listBucket(100); + expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); + expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should not block when bucket is full', async () => { + const tempNodeGraph = await NodeGraph.createNodeGraph({ + db, + keyManager, + logger, + }); + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph: tempNodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node does not block + const delayPing = promise(); + mockedPingNode.mockImplementation(async (_) => { + await delayPing.p; + return true; + }); + const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); + // Set manually to non-blocking + await expect( + nodeManager.setNode(newNode4, address, false), + ).resolves.toBeUndefined(); + delayPing.resolveP(); + await queue.drained(); + } finally { + await nodeManager.stop(); + await queue.stop(); + await tempNodeGraph.stop(); + await tempNodeGraph.destroy(); + } + }); + test('should block when blocking is set to true', async () => { + mockedPingNode.mockImplementation(async (_) => true); + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + logger, + }); + await queue.start(); + await nodeManager.start(); + try { + await nodeConnectionManager.start({ nodeManager }); + const nodeId = keyManager.getNodeId(); + const address = { host: localhost, port }; + // Let's fill a bucket + for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { + const newNode = generateNodeIdForBucket(nodeId, 100, i); + await nodeManager.setNode(newNode, address); + } + + // Set node can block + mockedPingNode.mockClear(); + mockedPingNode.mockImplementation(async () => true); + const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); + await expect( + nodeManager.setNode(newNode5, address, true), + ).resolves.toBeUndefined(); + expect(mockedPingNode).toBeCalled(); + } finally { + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should update deadline when updating a bucket', async () => { + const refreshBucketTimeout = 100000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // @ts-ignore: kidnap map + const deadlineMap = nodeManager.refreshBucketDeadlineMap; + // Getting starting value + const bucket = 0; + const startingDeadline = deadlineMap.get(bucket); + const nodeId = nodesTestUtils.generateNodeIdForBucket( + keyManager.getNodeId(), + bucket, ); - expect(chainData).toContain(nodesUtils.encodeNodeId(xNodeId)); - expect(chainData).toContain(nodesUtils.encodeNodeId(yNodeId)); + await sleep(1000); + await nodeManager.setNode(nodeId, {} as NodeAddress); + // Deadline should be updated + const newDeadline = deadlineMap.get(bucket); + expect(newDeadline).not.toEqual(startingDeadline); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should add buckets to the queue when exceeding deadline', async () => { + const refreshBucketTimeout = 100; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + const mockRefreshBucketQueueAdd = jest.spyOn( + NodeManager.prototype, + 'refreshBucketQueueAdd', + ); + try { + mockRefreshBucket.mockImplementation(async () => {}); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // Getting starting value + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(0); + await sleep(200); + expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(256); + } finally { + mockRefreshBucketQueueAdd.mockRestore(); + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should digest queue to refresh buckets', async () => { + const refreshBucketTimeout = 1000000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation(async () => {}); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.refreshBucketQueueDrained(); + expect(mockRefreshBucket).toHaveBeenCalledTimes(5); + + // Add buckets to queue + // check if refresh buckets was called + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } + }); + test('should abort refreshBucket queue when stopping', async () => { + const refreshBucketTimeout = 1000000; + const queue = new Queue({ logger }); + const nodeManager = new NodeManager({ + db, + sigchain: {} as Sigchain, + keyManager, + nodeGraph, + nodeConnectionManager: dummyNodeConnectionManager, + queue, + refreshBucketTimerDefault: refreshBucketTimeout, + logger, + }); + const mockRefreshBucket = jest.spyOn( + NodeManager.prototype, + 'refreshBucket', + ); + try { + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + mockRefreshBucket.mockImplementation( + async (bucket, options: { signal?: AbortSignal } = {}) => { + const { signal } = { ...options }; + const prom = promise(); + signal?.addEventListener('abort', () => + prom.rejectP(new nodesErrors.ErrorNodeAborted()), + ); + await prom.p; + }, + ); + nodeManager.refreshBucketQueueAdd(1); + nodeManager.refreshBucketQueueAdd(2); + nodeManager.refreshBucketQueueAdd(3); + nodeManager.refreshBucketQueueAdd(4); + nodeManager.refreshBucketQueueAdd(5); + await nodeManager.stop(); + } finally { + mockRefreshBucket.mockRestore(); + await nodeManager.stop(); + await queue.stop(); + } }); }); diff --git a/tests/nodes/utils.test.ts b/tests/nodes/utils.test.ts index ee1aeadc4..c87a82f26 100644 --- a/tests/nodes/utils.test.ts +++ b/tests/nodes/utils.test.ts @@ -1,48 +1,69 @@ import type { NodeId } from '@/nodes/types'; +import os from 'os'; +import path from 'path'; +import fs from 'fs'; +import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; +import lexi from 'lexicographic-integer'; import { IdInternal } from '@matrixai/id'; +import { DB } from '@matrixai/db'; import * as nodesUtils from '@/nodes/utils'; +import * as keysUtils from '@/keys/utils'; +import * as utils from '@/utils'; +import * as testNodesUtils from './utils'; -describe('Nodes utils', () => { - test('basic distance calculation', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 5, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 23, 0, - 0, 0, 0, 0, 0, 0, 0, 1, - ]); - - const distance = nodesUtils.calculateDistance(nodeId1, nodeId2); - expect(distance).toEqual(316912758671486456376015716356n); +describe('nodes/utils', () => { + const logger = new Logger(`nodes/utils test`, LogLevel.WARN, [ + new StreamHandler(), + ]); + let dataDir: string; + let db: DB; + beforeEach(async () => { + dataDir = await fs.promises.mkdtemp( + path.join(os.tmpdir(), 'polykey-test-'), + ); + const dbKey = await keysUtils.generateKey(); + const dbPath = `${dataDir}/db`; + db = await DB.createDB({ + dbPath, + logger, + crypto: { + key: dbKey, + ops: { + encrypt: keysUtils.encryptWithKey, + decrypt: keysUtils.decryptWithKey, + }, + }, + }); }); - test('calculates correct first bucket (bucket 0)', async () => { - // "1" XOR "0" = distance of 1 - // Therefore, bucket 0 - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(0); + afterEach(async () => { + await db.stop(); + await fs.promises.rm(dataDir, { + force: true, + recursive: true, + }); }); - test('calculates correct arbitrary bucket (bucket 63)', async () => { - const nodeId1 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 255, 0, 0, 0, 0, 0, 0, 0, - ]); - const nodeId2 = IdInternal.create([ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, - ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); - expect(bucketIndex).toBe(63); + test('calculating bucket index from the same node ID', () => { + const nodeId1 = IdInternal.create([0]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + expect(distance).toBe(0n); + expect(() => nodesUtils.bucketIndex(nodeId1, nodeId2)).toThrow(RangeError); }); - test('calculates correct last bucket (bucket 255)', async () => { + test('calculating bucket index 0', () => { + // Distance is calculated based on XOR operation + // 1 ^ 0 == 1 + // Distance of 1 is bucket 0 + const nodeId1 = IdInternal.create([1]); + const nodeId2 = IdInternal.create([0]); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + expect(distance).toBe(1n); + expect(bucketIndex).toBe(0); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + }); + test('calculating bucket index 255', () => { const nodeId1 = IdInternal.create([ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -51,7 +72,121 @@ describe('Nodes utils', () => { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]); - const bucketIndex = nodesUtils.calculateBucketIndex(nodeId1, nodeId2); + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); expect(bucketIndex).toBe(255); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + }); + test('calculating bucket index randomly', () => { + for (let i = 0; i < 1000; i++) { + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + if (nodeId1.equals(nodeId2)) { + continue; + } + const distance = nodesUtils.nodeDistance(nodeId1, nodeId2); + const bucketIndex = nodesUtils.bucketIndex(nodeId1, nodeId2); + // Triangle inequality 2^i <= distance < 2^(i + 1) + expect(2 ** bucketIndex <= distance).toBe(true); + expect(distance < 2 ** (bucketIndex + 1)).toBe(true); + } + }); + test('parse NodeGraph buckets db key', async () => { + const bucketsDb = await db.level('buckets'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = nodesUtils.bucketKey(bucketIndex); + const nodeId = testNodesUtils.generateRandomNodeId(); + data.push({ + bucketIndex, + bucketKey, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), nodeId]), + }); + const bucketDomain = ['buckets', bucketKey]; + await db.put(bucketDomain, nodesUtils.bucketDbKey(nodeId), null); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of the bucket key and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of bucketsDb.createKeyStream()) { + const { bucketIndex, bucketKey, nodeId } = nodesUtils.parseBucketsDbKey( + key as Buffer, + ); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('parse NodeGraph lastUpdated buckets db key', async () => { + const lastUpdatedDb = await db.level('lastUpdated'); + const data: Array<{ + bucketIndex: number; + bucketKey: string; + lastUpdated: number; + nodeId: NodeId; + key: Buffer; + }> = []; + for (let i = 0; i < 1000; i++) { + const bucketIndex = Math.floor(Math.random() * (255 + 1)); + const bucketKey = lexi.pack(bucketIndex, 'hex'); + const lastUpdated = utils.getUnixtime(); + const nodeId = testNodesUtils.generateRandomNodeId(); + const lastUpdatedKey = nodesUtils.lastUpdatedBucketDbKey( + lastUpdated, + nodeId, + ); + data.push({ + bucketIndex, + bucketKey, + lastUpdated, + nodeId, + key: Buffer.concat([Buffer.from(bucketKey), lastUpdatedKey]), + }); + const lastUpdatedDomain = ['lastUpdated', bucketKey]; + await db.put(lastUpdatedDomain, lastUpdatedKey, null); + } + // LevelDB will store keys in lexicographic order + // Use the key property as a concatenated buffer of + // the bucket key and last updated and node ID + data.sort((a, b) => Buffer.compare(a.key, b.key)); + let i = 0; + for await (const key of lastUpdatedDb.createKeyStream()) { + const { bucketIndex, bucketKey, lastUpdated, nodeId } = + nodesUtils.parseLastUpdatedBucketsDbKey(key as Buffer); + expect(bucketIndex).toBe(data[i].bucketIndex); + expect(bucketKey).toBe(data[i].bucketKey); + expect(lastUpdated).toBe(data[i].lastUpdated); + expect(nodeId.equals(data[i].nodeId)).toBe(true); + i++; + } + }); + test('should generate random distance for a bucket', async () => { + // Const baseNodeId = testNodesUtils.generateRandomNodeId(); + const zeroNodeId = IdInternal.fromBuffer(Buffer.alloc(32, 0)); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomDistanceForBucket(i); + expect(nodesUtils.bucketIndex(zeroNodeId, randomDistance)).toEqual(i); + } + }); + test('should generate random NodeId for a bucket', async () => { + const baseNodeId = testNodesUtils.generateRandomNodeId(); + for (let i = 0; i < 255; i++) { + const randomDistance = nodesUtils.generateRandomNodeIdForBucket( + baseNodeId, + i, + ); + expect(nodesUtils.bucketIndex(baseNodeId, randomDistance)).toEqual(i); + } }); }); diff --git a/tests/nodes/utils.ts b/tests/nodes/utils.ts index fca9ad53b..e6c603e14 100644 --- a/tests/nodes/utils.ts +++ b/tests/nodes/utils.ts @@ -1,9 +1,27 @@ import type { NodeId, NodeAddress } from '@/nodes/types'; - import type PolykeyAgent from '@/PolykeyAgent'; import { IdInternal } from '@matrixai/id'; +import * as keysUtils from '@/keys/utils'; import { bigInt2Bytes } from '@/utils'; +/** + * Generate random `NodeId` + * If `readable` is `true`, then it will generate a `NodeId` where + * its binary string form will only contain hex characters + * However the `NodeId` will not be uniformly random as it will not cover + * the full space of possible node IDs + * Prefer to keep `readable` `false` if possible to ensure tests are robust + */ +function generateRandomNodeId(readable: boolean = false): NodeId { + if (readable) { + const random = keysUtils.getRandomBytesSync(16).toString('hex'); + return IdInternal.fromString(random); + } else { + const random = keysUtils.getRandomBytesSync(32); + return IdInternal.fromBuffer(random); + } +} + /** * Generate a deterministic NodeId for a specific bucket given an existing NodeId * This requires solving the bucket index (`i`) and distance equation: @@ -61,4 +79,4 @@ async function nodesConnect(localNode: PolykeyAgent, remoteNode: PolykeyAgent) { } as NodeAddress); } -export { generateNodeIdForBucket, nodesConnect }; +export { generateRandomNodeId, generateNodeIdForBucket, nodesConnect }; diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index cd3e1eaaa..d616fce2e 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -8,6 +8,7 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; +import Queue from '@/nodes/Queue'; import PolykeyAgent from '@/PolykeyAgent'; import ACL from '@/acl/ACL'; import Sigchain from '@/sigchain/Sigchain'; @@ -50,6 +51,7 @@ describe('NotificationsManager', () => { let acl: ACL; let db: DB; let nodeGraph: NodeGraph; + let queue: Queue; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let keyManager: KeyManager; @@ -112,21 +114,26 @@ describe('NotificationsManager', () => { keyManager, logger, }); + queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ nodeGraph, keyManager, proxy, + queue, logger, }); - await nodeConnectionManager.start(); nodeManager = new NodeManager({ db, keyManager, sigchain, nodeConnectionManager, nodeGraph, + queue, logger, }); + await queue.start(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); // Set up node for receiving notifications receiver = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -146,7 +153,9 @@ describe('NotificationsManager', () => { }, global.defaultTimeout); afterAll(async () => { await receiver.stop(); + await queue.stop(); await nodeConnectionManager.stop(); + await nodeManager.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); diff --git a/tests/notifications/utils.test.ts b/tests/notifications/utils.test.ts index 5a3b8a617..fa6373e38 100644 --- a/tests/notifications/utils.test.ts +++ b/tests/notifications/utils.test.ts @@ -2,16 +2,15 @@ import type { Notification, NotificationData } from '@/notifications/types'; import type { VaultActions, VaultName } from '@/vaults/types'; import { createPublicKey } from 'crypto'; import { EmbeddedJWK, jwtVerify, exportJWK } from 'jose'; - import * as keysUtils from '@/keys/utils'; import * as notificationsUtils from '@/notifications/utils'; import * as notificationsErrors from '@/notifications/errors'; import * as vaultsUtils from '@/vaults/utils'; import * as nodesUtils from '@/nodes/utils'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Notifications utils', () => { - const nodeId = testUtils.generateRandomNodeId(); + const nodeId = testNodesUtils.generateRandomNodeId(); const nodeIdEncoded = nodesUtils.encodeNodeId(nodeId); const vaultId = vaultsUtils.generateVaultId(); const vaultIdEncoded = vaultsUtils.encodeVaultId(vaultId); @@ -206,7 +205,7 @@ describe('Notifications utils', () => { }); test('validates correct notifications', async () => { - const nodeIdOther = testUtils.generateRandomNodeId(); + const nodeIdOther = testNodesUtils.generateRandomNodeId(); const nodeIdOtherEncoded = nodesUtils.encodeNodeId(nodeIdOther); const generalNotification: Notification = { data: { diff --git a/tests/sigchain/Sigchain.test.ts b/tests/sigchain/Sigchain.test.ts index b6ff170ef..47ccc1c62 100644 --- a/tests/sigchain/Sigchain.test.ts +++ b/tests/sigchain/Sigchain.test.ts @@ -10,8 +10,9 @@ import { KeyManager, utils as keysUtils } from '@/keys'; import { Sigchain } from '@/sigchain'; import * as claimsUtils from '@/claims/utils'; import * as sigchainErrors from '@/sigchain/errors'; -import { utils as nodesUtils } from '@/nodes'; +import * as nodesUtils from '@/nodes/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Sigchain', () => { const logger = new Logger('Sigchain Test', LogLevel.WARN, [ @@ -19,25 +20,25 @@ describe('Sigchain', () => { ]); const password = 'password'; const srcNodeIdEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId2Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeId3Encoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdAEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdBEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdCEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); const nodeIdDEncoded = nodesUtils.encodeNodeId( - testUtils.generateRandomNodeId(), + testNodesUtils.generateRandomNodeId(), ); let mockedGenerateKeyPair: jest.SpyInstance; @@ -235,7 +236,7 @@ describe('Sigchain', () => { expect(verified2).toBe(true); // Check the hash of the previous claim is correct - const verifiedHash = await claimsUtils.verifyHashOfClaim( + const verifiedHash = claimsUtils.verifyHashOfClaim( claim1, decoded2.payload.hPrev as string, ); @@ -325,7 +326,9 @@ describe('Sigchain', () => { // Add 10 claims for (let i = 1; i <= 5; i++) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); node2s.push(node2); const nodeLink: ClaimData = { type: 'node', @@ -374,7 +377,9 @@ describe('Sigchain', () => { for (let i = 1; i <= 30; i++) { // If even, add a node link if (i % 2 === 0) { - const node2 = nodesUtils.encodeNodeId(testUtils.generateRandomNodeId()); + const node2 = nodesUtils.encodeNodeId( + testNodesUtils.generateRandomNodeId(), + ); nodes[i] = node2; const nodeLink: ClaimData = { type: 'node', diff --git a/tests/status/Status.test.ts b/tests/status/Status.test.ts index 311f89a11..0b0744002 100644 --- a/tests/status/Status.test.ts +++ b/tests/status/Status.test.ts @@ -6,15 +6,15 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import config from '@/config'; import { Status, errors as statusErrors } from '@/status'; -import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('Status', () => { const logger = new Logger(`${Status.name} Test`, LogLevel.WARN, [ new StreamHandler(), ]); - const nodeId1 = testUtils.generateRandomNodeId(); - const nodeId2 = testUtils.generateRandomNodeId(); - const nodeId3 = testUtils.generateRandomNodeId(); + const nodeId1 = testNodesUtils.generateRandomNodeId(); + const nodeId2 = testNodesUtils.generateRandomNodeId(); + const nodeId3 = testNodesUtils.generateRandomNodeId(); let dataDir: string; beforeEach(async () => { dataDir = await fs.promises.mkdtemp(path.join(os.tmpdir(), 'status-test-')); diff --git a/tests/utils.test.ts b/tests/utils.test.ts index 5f6ee891e..e210f34da 100644 --- a/tests/utils.test.ts +++ b/tests/utils.test.ts @@ -194,7 +194,7 @@ describe('utils', () => { expect(await g1.next()).toStrictEqual({ value: 'second', done: false }); expect(await g1.next()).toStrictEqual({ value: 'last', done: true }); // Noop resource - const g2 = await utils.withG( + const g2 = utils.withG( [ async () => { return [async () => {}]; @@ -258,4 +258,115 @@ describe('utils', () => { expect(acquireOrder).toStrictEqual([lock1, lock2]); expect(releaseOrder).toStrictEqual([lock2, lock1]); }); + test('splitting buffers', () => { + const s1 = ''; + expect(s1.split('')).toStrictEqual([]); + const b1 = Buffer.from(s1); + expect(utils.bufferSplit(b1)).toStrictEqual([]); + + const s2 = '!'; + expect(s2.split('!')).toStrictEqual(['', '']); + const b2 = Buffer.from(s2); + expect(utils.bufferSplit(b2, Buffer.from('!'))).toStrictEqual([ + Buffer.from(''), + Buffer.from(''), + ]); + + const s3 = '!a'; + expect(s3.split('!')).toStrictEqual(['', 'a']); + const b3 = Buffer.from(s3); + expect(utils.bufferSplit(b3, Buffer.from('!'))).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + ]); + + const s4 = 'a!'; + expect(s4.split('!')).toStrictEqual(['a', '']); + const b4 = Buffer.from(s4); + expect(utils.bufferSplit(b4, Buffer.from('!'))).toStrictEqual([ + Buffer.from('a'), + Buffer.from(''), + ]); + + const s5 = 'a!b'; + expect(s5.split('!')).toStrictEqual(['a', 'b']); + const b5 = Buffer.from(s5); + expect(utils.bufferSplit(b5, Buffer.from('!'))).toStrictEqual([ + Buffer.from('a'), + Buffer.from('b'), + ]); + + const s6 = '!a!b'; + expect(s6.split('!')).toStrictEqual(['', 'a', 'b']); + const b6 = Buffer.from(s6); + expect(utils.bufferSplit(b6, Buffer.from('!'))).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b'), + ]); + + const s7 = 'a!b!'; + expect(s7.split('!')).toStrictEqual(['a', 'b', '']); + const b7 = Buffer.from(s7); + expect(utils.bufferSplit(b7, Buffer.from('!'))).toStrictEqual([ + Buffer.from('a'), + Buffer.from('b'), + Buffer.from(''), + ]); + + const s8 = '!a!b!'; + expect(s8.split('!')).toStrictEqual(['', 'a', 'b', '']); + const b8 = Buffer.from(s8); + expect(utils.bufferSplit(b8, Buffer.from('!'))).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b'), + Buffer.from(''), + ]); + + const s9 = '!a!b!'; + expect(s8.split('!', 2)).toStrictEqual(['', 'a']); + expect(s8.split('!', 3)).toStrictEqual(['', 'a', 'b']); + expect(s8.split('!', 4)).toStrictEqual(['', 'a', 'b', '']); + const b9 = Buffer.from(s9); + expect(utils.bufferSplit(b9, Buffer.from('!'), 2)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + ]); + expect(utils.bufferSplit(b9, Buffer.from('!'), 3)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b'), + ]); + expect(utils.bufferSplit(b9, Buffer.from('!'), 4)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b'), + Buffer.from(''), + ]); + + const s10 = 'abcd'; + expect(s10.split('')).toStrictEqual(['a', 'b', 'c', 'd']); + const b10 = Buffer.from(s10); + expect(utils.bufferSplit(b10)).toStrictEqual([ + Buffer.from('a'), + Buffer.from('b'), + Buffer.from('c'), + Buffer.from('d'), + ]); + + // Splitting while concatenating the remaining chunk + const b11 = Buffer.from('!a!b!'); + expect(utils.bufferSplit(b11, Buffer.from('!'), 3, true)).toStrictEqual([ + Buffer.from(''), + Buffer.from('a'), + Buffer.from('b!'), + ]); + const b12 = Buffer.from('!ab!cd!e!!!!'); + expect(utils.bufferSplit(b12, Buffer.from('!'), 3, true)).toStrictEqual([ + Buffer.from(''), + Buffer.from('ab'), + Buffer.from('cd!e!!!!'), + ]); + }); }); diff --git a/tests/utils.ts b/tests/utils.ts index 3f446b465..983714c19 100644 --- a/tests/utils.ts +++ b/tests/utils.ts @@ -1,11 +1,11 @@ -import type { StatusLive } from '@/status/types'; -import type { NodeId } from '@/nodes/types'; +// Import type { StatusLive } from '@/status/types'; +// import type { Host } from '@/network/types'; import type { Host } from '@/network/types'; +import type { StatusLive } from '@/status/types'; import path from 'path'; import fs from 'fs'; import lock from 'fd-lock'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; -import { IdInternal } from '@matrixai/id'; import PolykeyAgent from '@/PolykeyAgent'; import Status from '@/status/Status'; import GRPCClientClient from '@/client/GRPCClientClient'; @@ -70,24 +70,25 @@ async function setupGlobalKeypair() { } } -/** - * Setup the global agent - * Use this in beforeAll, and use the closeGlobalAgent in afterAll - * This is expected to be executed by multiple worker processes - * Uses a references directory as a reference count - * Uses fd-lock to serialise access - * This means all test modules using this will be serialised - * Any beforeAll must use globalThis.maxTimeout - * Tips for usage: - * * Do not restart this global agent - * * Ensure client-side side-effects are removed at the end of each test - * * Ensure server-side side-effects are removed at the end of each test - */ +// FIXME: what is going on here? is this getting removed? +// /** +// * Setup the global agent +// * Use this in beforeAll, and use the closeGlobalAgent in afterAll +// * This is expected to be executed by multiple worker processes +// * Uses a references directory as a reference count +// * Uses fd-lock to serialise access +// * This means all test modules using this will be serialised +// * Any beforeAll must use globalThis.maxTimeout +// * Tips for usage: +// * * Do not restart this global agent +// * * Ensure client-side side-effects are removed at the end of each test +// * * Ensure server-side side-effects are removed at the end of each test +// */ async function setupGlobalAgent( logger: Logger = new Logger(setupGlobalAgent.name, LogLevel.WARN, [ new StreamHandler(), ]), -) { +): Promise { const globalAgentPassword = 'password'; const globalAgentDir = path.join(globalThis.dataDir, 'agent'); // The references directory will act like our reference count @@ -179,9 +180,4 @@ async function setupGlobalAgent( }; } -function generateRandomNodeId(): NodeId { - const random = keysUtils.getRandomBytesSync(16).toString('hex'); - return IdInternal.fromString(random); -} - -export { setupGlobalKeypair, setupGlobalAgent, generateRandomNodeId }; +export { setupGlobalKeypair, setupGlobalAgent }; diff --git a/tests/vaults/VaultInternal.test.ts b/tests/vaults/VaultInternal.test.ts index 34f03d70c..d5e06bd88 100644 --- a/tests/vaults/VaultInternal.test.ts +++ b/tests/vaults/VaultInternal.test.ts @@ -15,7 +15,7 @@ import * as vaultsErrors from '@/vaults/errors'; import { sleep } from '@/utils'; import * as keysUtils from '@/keys/utils'; import * as vaultsUtils from '@/vaults/utils'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; jest.mock('@/keys/utils', () => ({ ...jest.requireActual('@/keys/utils'), @@ -40,7 +40,7 @@ describe('VaultInternal', () => { const fakeKeyManager = { getNodeId: () => { - return testsUtils.generateRandomNodeId(); + return nodeTestUtils.generateRandomNodeId(); }, } as KeyManager; const secret1 = { name: 'secret-1', content: 'secret-content-1' }; @@ -665,7 +665,7 @@ describe('VaultInternal', () => { await efs.writeFile(secret2.name, secret2.content); }); const commit = (await vault.log())[0].commitId; - const gen = await vault.readG(async function* (efs): AsyncGenerator { + const gen = vault.readG(async function* (efs): AsyncGenerator { yield expect((await efs.readFile(secret1.name)).toString()).toEqual( secret1.content, ); diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 2117ea7a8..5135a76b5 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -7,6 +7,8 @@ import type { } from '@/vaults/types'; import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; +import type NodeManager from '@/nodes/NodeManager'; +import type Queue from '@/nodes/Queue'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -29,7 +31,7 @@ import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import { sleep } from '@/utils'; import VaultInternal from '@/vaults/VaultInternal'; -import * as testsUtils from '../utils'; +import * as nodeTestUtils from '../nodes/utils'; const mockedGenerateDeterministicKeyPair = jest .spyOn(keysUtils, 'generateDeterministicKeyPair') @@ -63,7 +65,7 @@ describe('VaultManager', () => { let db: DB; // We only ever use this to get NodeId, No need to create a whole one - const nodeId = testsUtils.generateRandomNodeId(); + const nodeId = nodeTestUtils.generateRandomNodeId(); const dummyKeyManager = { getNodeId: () => nodeId, } as KeyManager; @@ -485,7 +487,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 1'), nodePath: path.join(allDataDir, 'remoteKeynode1'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode1Id = remoteKeynode1.keyManager.getNodeId(); @@ -495,7 +497,7 @@ describe('VaultManager', () => { logger: logger.getChild('Remote Keynode 2'), nodePath: path.join(allDataDir, 'remoteKeynode2'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, }); remoteKeynode2Id = remoteKeynode2.keyManager.getNodeId(); @@ -571,9 +573,12 @@ describe('VaultManager', () => { keyManager, nodeGraph, proxy, + queue: {} as Queue, logger, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); await nodeGraph.setNode(remoteKeynode1Id, { host: remoteKeynode1.proxy.getProxyHost(), @@ -1310,7 +1315,7 @@ describe('VaultManager', () => { }); await sleep(200); expect(pullVaultMock).not.toHaveBeenCalled(); - await releaseWrite(); + releaseWrite(); await pullP; expect(pullVaultMock).toHaveBeenCalled(); pullVaultMock.mockClear(); @@ -1337,7 +1342,7 @@ describe('VaultManager', () => { }); await sleep(200); expect(gitPullMock).not.toHaveBeenCalled(); - await releaseVaultWrite(); + releaseVaultWrite(); await pullP2; expect(gitPullMock).toHaveBeenCalled(); } finally { @@ -1371,8 +1376,8 @@ describe('VaultManager', () => { }); try { // Setting up state - const nodeId1 = testsUtils.generateRandomNodeId(); - const nodeId2 = testsUtils.generateRandomNodeId(); + const nodeId1 = nodeTestUtils.generateRandomNodeId(); + const nodeId2 = nodeTestUtils.generateRandomNodeId(); await gestaltGraph.setNode({ id: nodesUtils.encodeNodeId(nodeId1), chain: {}, @@ -1431,7 +1436,7 @@ describe('VaultManager', () => { password: 'password', nodePath: path.join(dataDir, 'remoteNode'), networkConfig: { - proxyHost: '127.0.0.1' as Host, + proxyHost: localHost, }, logger, }); @@ -1471,9 +1476,12 @@ describe('VaultManager', () => { logger, nodeGraph, proxy, + queue: {} as Queue, connConnectTime: 1000, }); - await nodeConnectionManager.start(); + await nodeConnectionManager.start({ + nodeManager: { setNode: jest.fn() } as unknown as NodeManager, + }); const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyManager, diff --git a/tests/vaults/VaultOps.test.ts b/tests/vaults/VaultOps.test.ts index e376eb306..9c4a70e7e 100644 --- a/tests/vaults/VaultOps.test.ts +++ b/tests/vaults/VaultOps.test.ts @@ -14,6 +14,7 @@ import * as vaultOps from '@/vaults/VaultOps'; import * as vaultsUtils from '@/vaults/utils'; import * as keysUtils from '@/keys/utils'; import * as testUtils from '../utils'; +import * as testNodesUtils from '../nodes/utils'; describe('VaultOps', () => { const logger = new Logger('VaultOps', LogLevel.WARN, [new StreamHandler()]); @@ -28,7 +29,7 @@ describe('VaultOps', () => { let vaultsDbDomain: DBDomain; const dummyKeyManager = { getNodeId: () => { - return testUtils.generateRandomNodeId(); + return testNodesUtils.generateRandomNodeId(); }, } as KeyManager; @@ -357,7 +358,7 @@ describe('VaultOps', () => { expect( (await vaultOps.getSecret(vault, '.hidingSecret')).toString(), ).toStrictEqual('change_contents'); - await expect( + expect( ( await vaultOps.getSecret(vault, '.hidingDir/.hiddenInSecret') ).toString(),