diff --git a/package-lock.json b/package-lock.json index d0558ba09..605305c01 100644 --- a/package-lock.json +++ b/package-lock.json @@ -12,7 +12,7 @@ "@grpc/grpc-js": "1.6.7", "@matrixai/async-cancellable": "^1.0.2", "@matrixai/async-init": "^1.8.2", - "@matrixai/async-locks": "^3.1.2", + "@matrixai/async-locks": "^3.2.0", "@matrixai/db": "^5.0.3", "@matrixai/errors": "^1.1.3", "@matrixai/id": "^3.3.3", @@ -2638,9 +2638,9 @@ } }, "node_modules/@matrixai/async-locks": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.1.2.tgz", - "integrity": "sha512-rIA89EGBNlWV59pLVwx7aqlKWVJRCOsVi6evt8HoN6dyvyyns8//Q8PyBcg5ay0GjLkqsXKQjYXMRif5OB3VSg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.2.0.tgz", + "integrity": "sha512-Gl919y3GK2lBCI7M3MabE2u0+XOhKqqgwFEGVaPSI2BrdSI+RY7K3+dzjTSUTujVZwiYskT611CBvlDm9fhsNg==", "dependencies": { "@matrixai/errors": "^1.1.3", "@matrixai/resources": "^1.1.4", @@ -13411,9 +13411,9 @@ } }, "@matrixai/async-locks": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.1.2.tgz", - "integrity": "sha512-rIA89EGBNlWV59pLVwx7aqlKWVJRCOsVi6evt8HoN6dyvyyns8//Q8PyBcg5ay0GjLkqsXKQjYXMRif5OB3VSg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@matrixai/async-locks/-/async-locks-3.2.0.tgz", + "integrity": "sha512-Gl919y3GK2lBCI7M3MabE2u0+XOhKqqgwFEGVaPSI2BrdSI+RY7K3+dzjTSUTujVZwiYskT611CBvlDm9fhsNg==", "requires": { "@matrixai/errors": "^1.1.3", "@matrixai/resources": "^1.1.4", diff --git a/package.json b/package.json index 7d5dfecef..ffd45a1cf 100644 --- a/package.json +++ b/package.json @@ -80,7 +80,7 @@ "@grpc/grpc-js": "1.6.7", "@matrixai/async-cancellable": "^1.0.2", "@matrixai/async-init": "^1.8.2", - "@matrixai/async-locks": "^3.1.2", + "@matrixai/async-locks": "^3.2.0", "@matrixai/db": "^5.0.3", "@matrixai/errors": "^1.1.3", "@matrixai/id": "^3.3.3", diff --git a/src/PolykeyAgent.ts b/src/PolykeyAgent.ts index 528a092b5..997010d21 100644 --- a/src/PolykeyAgent.ts +++ b/src/PolykeyAgent.ts @@ -8,7 +8,6 @@ import process from 'process'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { CreateDestroyStartStop } from '@matrixai/async-init/dist/CreateDestroyStartStop'; -import Queue from './nodes/Queue'; import * as networkUtils from './network/utils'; import KeyManager from './keys/KeyManager'; import Status from './status/Status'; @@ -35,6 +34,7 @@ import * as errors from './errors'; import * as utils from './utils'; import * as keysUtils from './keys/utils'; import * as nodesUtils from './nodes/utils'; +import TaskManager from './tasks/TaskManager'; type NetworkConfig = { forwardHost?: Host; @@ -87,8 +87,8 @@ class PolykeyAgent { acl, gestaltGraph, proxy, + taskManager, nodeGraph, - queue, nodeConnectionManager, nodeManager, discovery, @@ -134,8 +134,8 @@ class PolykeyAgent { acl?: ACL; gestaltGraph?: GestaltGraph; proxy?: Proxy; + taskManager?: TaskManager; nodeGraph?: NodeGraph; - queue?: Queue; nodeConnectionManager?: NodeConnectionManager; nodeManager?: NodeManager; discovery?: Discovery; @@ -285,18 +285,21 @@ class PolykeyAgent { keyManager, logger: logger.getChild(NodeGraph.name), })); - queue = - queue ?? - new Queue({ - logger: logger.getChild(Queue.name), - }); + taskManager = + taskManager ?? + (await TaskManager.createTaskManager({ + db, + fresh, + lazy: true, + logger, + })); nodeConnectionManager = nodeConnectionManager ?? new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, seedNodes, ...nodeConnectionManagerConfig_, logger: logger.getChild(NodeConnectionManager.name), @@ -309,7 +312,7 @@ class PolykeyAgent { keyManager, nodeGraph, nodeConnectionManager, - queue, + taskManager, logger: logger.getChild(NodeManager.name), }); await nodeManager.start(); @@ -373,6 +376,7 @@ class PolykeyAgent { await notificationsManager?.stop(); await vaultManager?.stop(); await discovery?.stop(); + await taskManager?.stop(); await proxy?.stop(); await gestaltGraph?.stop(); await acl?.stop(); @@ -396,7 +400,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, - queue, + taskManager, nodeConnectionManager, nodeManager, discovery, @@ -429,7 +433,7 @@ class PolykeyAgent { public readonly gestaltGraph: GestaltGraph; public readonly proxy: Proxy; public readonly nodeGraph: NodeGraph; - public readonly queue: Queue; + public readonly taskManager: TaskManager; public readonly nodeConnectionManager: NodeConnectionManager; public readonly nodeManager: NodeManager; public readonly discovery: Discovery; @@ -454,7 +458,7 @@ class PolykeyAgent { gestaltGraph, proxy, nodeGraph, - queue, + taskManager, nodeConnectionManager, nodeManager, discovery, @@ -478,7 +482,7 @@ class PolykeyAgent { gestaltGraph: GestaltGraph; proxy: Proxy; nodeGraph: NodeGraph; - queue: Queue; + taskManager: TaskManager; nodeConnectionManager: NodeConnectionManager; nodeManager: NodeManager; discovery: Discovery; @@ -504,7 +508,7 @@ class PolykeyAgent { this.proxy = proxy; this.discovery = discovery; this.nodeGraph = nodeGraph; - this.queue = queue; + this.taskManager = taskManager; this.nodeConnectionManager = nodeConnectionManager; this.nodeManager = nodeManager; this.vaultManager = vaultManager; @@ -578,14 +582,10 @@ class PolykeyAgent { ); // Reverse connection was established and authenticated, // add it to the node graph - await this.nodeManager.setNode( - data.remoteNodeId, - { - host: data.remoteHost, - port: data.remotePort, - }, - false, - ); + await this.nodeManager.setNode(data.remoteNodeId, { + host: data.remoteHost, + port: data.remotePort, + }); } }, ); @@ -667,15 +667,16 @@ class PolykeyAgent { proxyPort: networkConfig_.proxyPort, tlsConfig, }); - await this.queue.start(); + await this.taskManager.start({ fresh, lazy: true }); await this.nodeManager.start(); await this.nodeConnectionManager.start({ nodeManager: this.nodeManager }); await this.nodeGraph.start({ fresh }); - await this.nodeConnectionManager.syncNodeGraph(false); + await this.nodeManager.syncNodeGraph(false); await this.discovery.start({ fresh }); await this.vaultManager.start({ fresh }); await this.notificationsManager.start({ fresh }); await this.sessionManager.start({ fresh }); + await this.taskManager.startProcessing(); await this.status.finishStart({ pid: process.pid, nodeId: this.keyManager.getNodeId(), @@ -693,14 +694,16 @@ class PolykeyAgent { this.logger.warn(`Failed Starting ${this.constructor.name}`); this.events.removeAllListeners(); await this.status?.beginStop({ pid: process.pid }); + await this.taskManager?.stopProcessing(); + await this.taskManager?.stopTasks(); await this.sessionManager?.stop(); await this.notificationsManager?.stop(); await this.vaultManager?.stop(); await this.discovery?.stop(); - await this.queue?.stop(); await this.nodeGraph?.stop(); await this.nodeConnectionManager?.stop(); await this.nodeManager?.stop(); + await this.taskManager?.stop(); await this.proxy?.stop(); await this.grpcServerAgent?.stop(); await this.grpcServerClient?.stop(); @@ -723,6 +726,8 @@ class PolykeyAgent { this.logger.info(`Stopping ${this.constructor.name}`); this.events.removeAllListeners(); await this.status.beginStop({ pid: process.pid }); + await this.taskManager.stopProcessing(); + await this.taskManager.stopTasks(); await this.sessionManager.stop(); await this.notificationsManager.stop(); await this.vaultManager.stop(); @@ -730,7 +735,7 @@ class PolykeyAgent { await this.nodeConnectionManager.stop(); await this.nodeGraph.stop(); await this.nodeManager.stop(); - await this.queue.stop(); + await this.taskManager.stop(); await this.proxy.stop(); await this.grpcServerAgent.stop(); await this.grpcServerClient.stop(); @@ -755,6 +760,7 @@ class PolykeyAgent { await this.discovery.destroy(); await this.nodeGraph.destroy(); await this.gestaltGraph.destroy(); + await this.taskManager.destroy(); await this.acl.destroy(); await this.sigchain.destroy(); await this.identitiesManager.destroy(); diff --git a/src/bin/errors.ts b/src/bin/errors.ts index be6876a65..34e76e41d 100644 --- a/src/bin/errors.ts +++ b/src/bin/errors.ts @@ -1,7 +1,25 @@ import ErrorPolykey from '../ErrorPolykey'; import sysexits from '../utils/sysexits'; -class ErrorCLI extends ErrorPolykey {} +class ErrorBin extends ErrorPolykey {} + +class ErrorBinUncaughtException extends ErrorBin { + static description = ''; + exitCode = sysexits.SOFTWARE; +} + +class ErrorBinUnhandledRejection extends ErrorBin { + static description = ''; + exitCode = sysexits.SOFTWARE; +} + +class ErrorBinAsynchronousDeadlock extends ErrorBin { + static description = + 'PolykeyAgent process exited unexpectedly, likely due to promise deadlock'; + exitCode = sysexits.SOFTWARE; +} + +class ErrorCLI extends ErrorBin {} class ErrorCLINodePath extends ErrorCLI { static description = 'Cannot derive default node path from unknown platform'; @@ -49,17 +67,21 @@ class ErrorCLIPolykeyAgentProcess extends ErrorCLI { exitCode = sysexits.OSERR; } -class ErrorNodeFindFailed extends ErrorCLI { +class ErrorCLINodeFindFailed extends ErrorCLI { static description = 'Failed to find the node in the DHT'; exitCode = 1; } -class ErrorNodePingFailed extends ErrorCLI { +class ErrorCLINodePingFailed extends ErrorCLI { static description = 'Node was not online or not found.'; exitCode = 1; } export { + ErrorBin, + ErrorBinUncaughtException, + ErrorBinUnhandledRejection, + ErrorBinAsynchronousDeadlock, ErrorCLI, ErrorCLINodePath, ErrorCLIClientOptions, @@ -70,6 +92,6 @@ export { ErrorCLIFileRead, ErrorCLIPolykeyAgentStatus, ErrorCLIPolykeyAgentProcess, - ErrorNodeFindFailed, - ErrorNodePingFailed, + ErrorCLINodeFindFailed, + ErrorCLINodePingFailed, }; diff --git a/src/bin/nodes/CommandFind.ts b/src/bin/nodes/CommandFind.ts index 32169a968..92b2900c1 100644 --- a/src/bin/nodes/CommandFind.ts +++ b/src/bin/nodes/CommandFind.ts @@ -93,7 +93,7 @@ class CommandFind extends CommandPolykey { ); // Like ping it should error when failing to find node for automation reasons. if (!result.success) { - throw new binErrors.ErrorNodeFindFailed(result.message); + throw new binErrors.ErrorCLINodeFindFailed(result.message); } } finally { if (pkClient! != null) await pkClient.stop(); diff --git a/src/bin/nodes/CommandPing.ts b/src/bin/nodes/CommandPing.ts index a15779c55..c9816ad18 100644 --- a/src/bin/nodes/CommandPing.ts +++ b/src/bin/nodes/CommandPing.ts @@ -56,7 +56,7 @@ class CommandPing extends CommandPolykey { ); } catch (err) { if (err.cause instanceof nodesErrors.ErrorNodeGraphNodeIdNotFound) { - error = new binErrors.ErrorNodePingFailed( + error = new binErrors.ErrorCLINodePingFailed( `Failed to resolve node ID ${nodesUtils.encodeNodeId( nodeId, )} to an address.`, @@ -69,7 +69,7 @@ class CommandPing extends CommandPolykey { const status = { success: false, message: '' }; status.success = statusMessage ? statusMessage.getSuccess() : false; if (!status.success && !error) { - error = new binErrors.ErrorNodePingFailed('No response received'); + error = new binErrors.ErrorCLINodePingFailed('No response received'); } if (status.success) status.message = 'Node is Active.'; else status.message = error.message; diff --git a/src/bin/utils/ExitHandlers.ts b/src/bin/utils/ExitHandlers.ts index 2fdd74f03..fbb1ee854 100644 --- a/src/bin/utils/ExitHandlers.ts +++ b/src/bin/utils/ExitHandlers.ts @@ -1,6 +1,7 @@ import process from 'process'; import * as binUtils from './utils'; import ErrorPolykey from '../../ErrorPolykey'; +import * as binErrors from '../errors'; class ExitHandlers { /** @@ -10,38 +11,7 @@ class ExitHandlers { public handlers: Array<(signal?: NodeJS.Signals) => Promise>; protected _exiting: boolean = false; protected _errFormat: 'json' | 'error'; - /** - * Handles synchronous and asynchronous exceptions - * This prints out appropriate error message on STDERR - * It sets the exit code according to the error - * 255 is set for unknown errors - */ - protected errorHandler = async (e: Error) => { - if (this._exiting) { - return; - } - this._exiting = true; - if (e instanceof ErrorPolykey) { - process.stderr.write( - binUtils.outputFormatter({ - type: this._errFormat, - data: e, - }), - ); - process.exitCode = e.exitCode; - } else { - // Unknown error, this should not happen - process.stderr.write( - binUtils.outputFormatter({ - type: this._errFormat, - data: e, - }), - ); - process.exitCode = 255; - } - // Fail fast pattern - process.exit(); - }; + /** * Handles termination signals * This is idempotent @@ -84,6 +54,67 @@ class ExitHandlers { } }; + /** + * Handles asynchronous exceptions + * This prints out appropriate error message on STDERR + * It sets the exit code to SOFTWARE + */ + protected unhandledRejectionHandler = async (e: Error) => { + if (this._exiting) { + return; + } + this._exiting = true; + const error = new binErrors.ErrorBinUnhandledRejection(undefined, { + cause: e, + }); + process.stderr.write( + binUtils.outputFormatter({ + type: this._errFormat, + data: e, + }), + ); + process.exitCode = error.exitCode; + // Fail fast pattern + process.exit(); + }; + + /** + * Handles synchronous exceptions + * This prints out appropriate error message on STDERR + * It sets the exit code to SOFTWARE + */ + protected uncaughtExceptionHandler = async (e: Error) => { + if (this._exiting) { + return; + } + this._exiting = true; + const error = new binErrors.ErrorBinUncaughtException(undefined, { + cause: e, + }); + process.stderr.write( + binUtils.outputFormatter({ + type: this._errFormat, + data: e, + }), + ); + process.exitCode = error.exitCode; + // Fail fast pattern + process.exit(); + }; + + protected deadlockHandler = async () => { + if (process.exitCode == null) { + const e = new binErrors.ErrorBinAsynchronousDeadlock(); + process.stderr.write( + binUtils.outputFormatter({ + type: this._errFormat, + data: e, + }), + ); + process.exitCode = e.exitCode; + } + }; + /** * Automatically installs all handlers */ @@ -108,8 +139,9 @@ class ExitHandlers { process.on('SIGQUIT', this.signalHandler); process.on('SIGHUP', this.signalHandler); // Both synchronous and asynchronous errors are handled - process.once('unhandledRejection', this.errorHandler); - process.once('uncaughtException', this.errorHandler); + process.once('unhandledRejection', this.unhandledRejectionHandler); + process.once('uncaughtException', this.uncaughtExceptionHandler); + process.once('beforeExit', this.deadlockHandler); } public uninstall() { @@ -117,8 +149,12 @@ class ExitHandlers { process.removeListener('SIGTERM', this.signalHandler); process.removeListener('SIGQUIT', this.signalHandler); process.removeListener('SIGHUP', this.signalHandler); - process.removeListener('unhandledRejection', this.errorHandler); - process.removeListener('uncaughtException', this.errorHandler); + process.removeListener( + 'unhandledRejection', + this.unhandledRejectionHandler, + ); + process.removeListener('uncaughtException', this.uncaughtExceptionHandler); + process.removeListener('beforeExit', this.deadlockHandler); } /** diff --git a/src/bootstrap/utils.ts b/src/bootstrap/utils.ts index 9eece1244..72c06de83 100644 --- a/src/bootstrap/utils.ts +++ b/src/bootstrap/utils.ts @@ -4,7 +4,7 @@ import path from 'path'; import Logger from '@matrixai/logger'; import { DB } from '@matrixai/db'; import * as bootstrapErrors from './errors'; -import Queue from '../nodes/Queue'; +import TaskManager from '../tasks/TaskManager'; import { IdentitiesManager } from '../identities'; import { SessionManager } from '../sessions'; import { Status } from '../status'; @@ -143,12 +143,16 @@ async function bootstrapState({ keyManager, logger: logger.getChild(NodeGraph.name), }); - const queue = new Queue({ logger }); + const taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, + }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, logger: logger.getChild(NodeConnectionManager.name), }); const nodeManager = new NodeManager({ @@ -157,7 +161,7 @@ async function bootstrapState({ nodeGraph, nodeConnectionManager, sigchain, - queue, + taskManager, logger: logger.getChild(NodeManager.name), }); const notificationsManager = @@ -196,6 +200,7 @@ async function bootstrapState({ await acl.stop(); await sigchain.stop(); await identitiesManager.stop(); + await taskManager.stop(); await db.stop(); await keyManager.stop(); await schema.stop(); diff --git a/src/client/service/nodesAdd.ts b/src/client/service/nodesAdd.ts index 87b356b7f..90ecebb10 100644 --- a/src/client/service/nodesAdd.ts +++ b/src/client/service/nodesAdd.ts @@ -81,6 +81,7 @@ function nodesAdd({ } as NodeAddress, true, request.getForce(), + 1500, undefined, tran, ), diff --git a/src/errors.ts b/src/errors.ts index 3f6aba171..e2114cf55 100644 --- a/src/errors.ts +++ b/src/errors.ts @@ -41,10 +41,6 @@ class ErrorPolykeyClientDestroyed extends ErrorPolykey { exitCode = sysexits.USAGE; } -class ErrorInvalidId extends ErrorPolykey {} - -class ErrorInvalidConfigEnvironment extends ErrorPolykey {} - export { sysexits, ErrorPolykey, @@ -56,8 +52,6 @@ export { ErrorPolykeyClientRunning, ErrorPolykeyClientNotRunning, ErrorPolykeyClientDestroyed, - ErrorInvalidId, - ErrorInvalidConfigEnvironment, }; /** diff --git a/src/nodes/NodeConnectionManager.ts b/src/nodes/NodeConnectionManager.ts index c1f5c1a85..9861e2445 100644 --- a/src/nodes/NodeConnectionManager.ts +++ b/src/nodes/NodeConnectionManager.ts @@ -4,7 +4,7 @@ import type Proxy from '../network/Proxy'; import type { Host, Hostname, Port } from '../network/types'; import type { Timer } from '../types'; import type NodeGraph from './NodeGraph'; -import type Queue from './Queue'; +import type TaskManager from '../tasks/TaskManager'; import type { NodeAddress, NodeData, @@ -13,6 +13,8 @@ import type { SeedNodes, } from './types'; import type NodeManager from './NodeManager'; +import type { ContextTimed } from 'contexts/types'; +import type { PromiseCancellable } from '@matrixai/async-cancellable'; import { withF } from '@matrixai/resources'; import Logger from '@matrixai/logger'; import { ready, StartStop } from '@matrixai/async-init/dist/StartStop'; @@ -22,13 +24,12 @@ import { LockBox, RWLockWriter } from '@matrixai/async-locks'; import NodeConnection from './NodeConnection'; import * as nodesUtils from './utils'; import * as nodesErrors from './errors'; +import { context, timedCancellable } from '../contexts'; import GRPCClientAgent from '../agent/GRPCClientAgent'; import * as validationUtils from '../validation/utils'; import * as networkUtils from '../network/utils'; -import * as agentErrors from '../agent/errors'; -import * as grpcErrors from '../grpc/errors'; import * as nodesPB from '../proto/js/polykey/v1/nodes/nodes_pb'; -import { timerStart } from '../utils'; +import { timerStart, never } from '../utils'; type ConnectionAndTimer = { connection: NodeConnection; @@ -57,7 +58,7 @@ class NodeConnectionManager { protected nodeGraph: NodeGraph; protected keyManager: KeyManager; protected proxy: Proxy; - protected queue: Queue; + protected taskManager: TaskManager; // NodeManager has to be passed in during start to allow co-dependency protected nodeManager: NodeManager | undefined; protected seedNodes: SeedNodes; @@ -73,12 +74,19 @@ class NodeConnectionManager { */ protected connections: Map = new Map(); protected connectionLocks: LockBox = new LockBox(); + // Tracks the backoff period for offline nodes + protected nodesBackoffMap: Map< + string, + { lastAttempt: number; delay: number } + > = new Map(); + protected backoffDefault: number = 300; // 5 min + protected backoffMultiplier: number = 2; // Doubles every failure public constructor({ keyManager, nodeGraph, proxy, - queue, + taskManager, seedNodes = {}, initialClosestNodes = 3, connConnectTime = 20000, @@ -88,7 +96,7 @@ class NodeConnectionManager { nodeGraph: NodeGraph; keyManager: KeyManager; proxy: Proxy; - queue: Queue; + taskManager: TaskManager; seedNodes?: SeedNodes; initialClosestNodes?: number; connConnectTime?: number; @@ -99,7 +107,7 @@ class NodeConnectionManager { this.keyManager = keyManager; this.nodeGraph = nodeGraph; this.proxy = proxy; - this.queue = queue; + this.taskManager = taskManager; this.seedNodes = seedNodes; this.initialClosestNodes = initialClosestNodes; this.connConnectTime = connConnectTime; @@ -109,13 +117,14 @@ class NodeConnectionManager { public async start({ nodeManager }: { nodeManager: NodeManager }) { this.logger.info(`Starting ${this.constructor.name}`); this.nodeManager = nodeManager; + // Adding seed nodes for (const nodeIdEncoded in this.seedNodes) { - const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded)!; + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) never(); await this.nodeManager.setNode( nodeId, this.seedNodes[nodeIdEncoded], true, - true, ); } this.logger.info(`Started ${this.constructor.name}`); @@ -164,11 +173,7 @@ class NodeConnectionManager { return [ async (e) => { await release(); - if ( - e instanceof nodesErrors.ErrorNodeConnectionDestroyed || - e instanceof grpcErrors.ErrorGRPC || - e instanceof agentErrors.ErrorAgentClientDestroyed - ) { + if (nodesUtils.isConnectionError(e)) { // Error with connection, shutting connection down await this.destroyConnection(targetNodeId); } @@ -195,14 +200,7 @@ class NodeConnectionManager { ): Promise { return await withF( [await this.acquireConnection(targetNodeId, timer)], - async ([conn]) => { - this.logger.info( - `withConnF calling function with connection to ${nodesUtils.encodeNodeId( - targetNodeId, - )}`, - ); - return await f(conn); - }, + async ([conn]) => await f(conn), ); } @@ -227,7 +225,8 @@ class NodeConnectionManager { const [release, conn] = await acquire(); let caughtError; try { - return yield* g(conn!); + if (conn == null) never(); + return yield* g(conn); } catch (e) { caughtError = e; throw e; @@ -248,25 +247,12 @@ class NodeConnectionManager { targetNodeId: NodeId, timer?: Timer, ): Promise { - this.logger.info( - `Getting connection to ${nodesUtils.encodeNodeId(targetNodeId)}`, - ); const targetNodeIdString = targetNodeId.toString() as NodeIdString; return await this.connectionLocks.withF( [targetNodeIdString, RWLockWriter, 'write'], async () => { const connAndTimer = this.connections.get(targetNodeIdString); - if (connAndTimer != null) { - this.logger.info( - `existing entry found for ${nodesUtils.encodeNodeId(targetNodeId)}`, - ); - return connAndTimer; - } - this.logger.info( - `no existing entry, creating connection to ${nodesUtils.encodeNodeId( - targetNodeId, - )}`, - ); + if (connAndTimer != null) return connAndTimer; // Creating the connection and set in map const targetAddress = await this.findNode(targetNodeId); if (targetAddress == null) { @@ -311,7 +297,7 @@ class NodeConnectionManager { }); // We can assume connection was established and destination was valid, // we can add the target to the nodeGraph - await this.nodeManager?.setNode(targetNodeId, targetAddress, false); + await this.nodeManager?.setNode(targetNodeId, targetAddress); // Creating TTL timeout const timeToLiveTimer = setTimeout(async () => { await this.destroyConnection(targetNodeId); @@ -379,14 +365,18 @@ class NodeConnectionManager { * @param nodeId Node ID of the node we are connecting to * @param proxyHost Proxy host of the reverse proxy * @param proxyPort Proxy port of the reverse proxy - * @param timer Connection timeout timer + * @param ctx */ public async holePunchForward( nodeId: NodeId, proxyHost: Host, proxyPort: Port, - timer?: Timer, + ctx?: ContextTimed, ): Promise { + const timer = + ctx?.timer.getTimeout() != null + ? timerStart(ctx.timer.getTimeout()) + : undefined; await this.proxy.openConnectionForward(nodeId, proxyHost, proxyPort, timer); } @@ -394,22 +384,31 @@ class NodeConnectionManager { * Retrieves the node address. If an entry doesn't exist in the db, then * proceeds to locate it using Kademlia. * @param targetNodeId Id of the node we are tying to find - * @param options + * @param ignoreRecentOffline skips nodes that are within their backoff period + * @param ctx */ + public findNode( + targetNodeId: NodeId, + ignoreRecentOffline?: boolean, + ctx?: Partial, + ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + @timedCancellable(true, 20000) public async findNode( targetNodeId: NodeId, - options: { signal?: AbortSignal } = {}, + ignoreRecentOffline: boolean = false, + @context ctx: ContextTimed, ): Promise { - const { signal } = { ...options }; // First check if we already have an existing ID -> address record let address = (await this.nodeGraph.getNode(targetNodeId))?.address; // Otherwise, attempt to locate it by contacting network address = address ?? - (await this.getClosestGlobalNodes(targetNodeId, undefined, { - signal, - })); + (await this.getClosestGlobalNodes( + targetNodeId, + ignoreRecentOffline, + ctx, + )); // TODO: This currently just does one iteration return address; } @@ -426,24 +425,26 @@ class NodeConnectionManager { * port). * @param targetNodeId ID of the node attempting to be found (i.e. attempting * to find its IP address and port) - * @param timer Connection timeout timer - * @param options + * @param ignoreRecentOffline skips nodes that are within their backoff period + * @param ctx * @returns whether the target node was located in the process */ + public getClosestGlobalNodes( + targetNodeId: NodeId, + ignoreRecentOffline?: boolean, + ctx?: Partial, + ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + @timedCancellable(true, 20000) public async getClosestGlobalNodes( targetNodeId: NodeId, - timer?: Timer, - options: { signal?: AbortSignal } = {}, + ignoreRecentOffline: boolean = false, + @context ctx: ContextTimed, ): Promise { const localNodeId = this.keyManager.getNodeId(); - const { signal } = { ...options }; // Let foundTarget: boolean = false; let foundAddress: NodeAddress | undefined = undefined; // Get the closest alpha nodes to the target node (set as shortlist) - // FIXME? this is an array. Shouldn't it be a set? - // It's possible for this to grow faster than we can consume it, - // doubly so if we allow duplicates const shortlist = await this.nodeGraph.getClosestNodes( targetNodeId, this.initialClosestNodes, @@ -458,11 +459,10 @@ class NodeConnectionManager { // Not sufficient to simply check if there's already a pre-existing connection // in nodeConnections - what if there's been more than 1 invocation of // getClosestGlobalNodes()? - const contacted: { [nodeId: string]: boolean } = {}; + const contacted: Set = new Set(); // Iterate until we've found and contacted k nodes - while (Object.keys(contacted).length <= this.nodeGraph.nodeBucketLimit) { - if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); - // While (!foundTarget) { + while (contacted.size <= this.nodeGraph.nodeBucketLimit) { + if (ctx.signal?.aborted) return; // Remove the node from the front of the array const nextNode = shortlist.shift(); // If we have no nodes left in the shortlist, then stop @@ -471,9 +471,8 @@ class NodeConnectionManager { } const [nextNodeId, nextNodeAddress] = nextNode; // Skip if the node has already been contacted - if (contacted[nextNodeId]) { - continue; - } + if (contacted.has(nextNodeId.toString())) continue; + if (ignoreRecentOffline && this.hasBackoff(nextNodeId)) continue; // Connect to the node (check if pre-existing connection exists, otherwise // create a new one) if ( @@ -481,23 +480,33 @@ class NodeConnectionManager { nextNodeId, nextNodeAddress.address.host, nextNodeAddress.address.port, + ctx, ) ) { await this.nodeManager!.setNode(nextNodeId, nextNodeAddress.address); + this.removeBackoff(nextNodeId); } else { + this.increaseBackoff(nextNodeId); continue; } contacted[nextNodeId] = true; // Ask the node to get their own closest nodes to the target - const foundClosest = await this.getRemoteNodeClosestNodes( - nextNodeId, - targetNodeId, - timer, - ); + let foundClosest: Array<[NodeId, NodeData]>; + try { + foundClosest = await this.getRemoteNodeClosestNodes( + nextNodeId, + targetNodeId, + ctx, + ); + } catch (e) { + if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) return; + throw e; + } + if (foundClosest.length === 0) continue; // Check to see if any of these are the target node. At the same time, add // them to the shortlist for (const [nodeId, nodeData] of foundClosest) { - if (signal?.aborted) throw new nodesErrors.ErrorNodeAborted(); + if (ctx.signal?.aborted) return; // Ignore any nodes that have been contacted or our own node if (contacted[nodeId] || localNodeId.equals(nodeId)) { continue; @@ -508,6 +517,7 @@ class NodeConnectionManager { nodeId, nodeData.address.host, nodeData.address.port, + ctx, )) ) { await this.nodeManager!.setNode(nodeId, nodeData.address); @@ -535,6 +545,22 @@ class NodeConnectionManager { } }); } + // If the found nodes are less than nodeBucketLimit then + // we expect that refresh buckets won't find anything new + if (Object.keys(contacted).length < this.nodeGraph.nodeBucketLimit) { + // Reset the delay on all refresh bucket tasks + for ( + let bucketIndex = 0; + bucketIndex < this.nodeGraph.nodeIdBits; + bucketIndex++ + ) { + await this.nodeManager?.updateRefreshBucketDelay( + bucketIndex, + undefined, + true, + ); + } + } return foundAddress; } @@ -543,109 +569,60 @@ class NodeConnectionManager { * target node ID. * @param nodeId the node ID to search on * @param targetNodeId the node ID to find other nodes closest to it - * @param timer Connection timeout timer - * @returns list of nodes and their IP/port that are closest to the target + * @param ctx */ + public getRemoteNodeClosestNodes( + nodeId: NodeId, + targetNodeId: NodeId, + ctx?: Partial, + ): PromiseCancellable>; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + @timedCancellable(true, 20000) public async getRemoteNodeClosestNodes( nodeId: NodeId, targetNodeId: NodeId, - timer?: Timer, + @context ctx: ContextTimed, ): Promise> { // Construct the message const nodeIdMessage = new nodesPB.Node(); nodeIdMessage.setNodeId(nodesUtils.encodeNodeId(targetNodeId)); - // Send through client - return this.withConnF( - nodeId, - async (connection) => { - const client = connection.getClient(); - const response = await client.nodesClosestLocalNodesGet(nodeIdMessage); - const nodes: Array<[NodeId, NodeData]> = []; - // Loop over each map element (from the returned response) and populate nodes - response.getNodeTableMap().forEach((address, nodeIdString: string) => { - const nodeId = nodesUtils.decodeNodeId(nodeIdString); - // If the nodeId is not valid we don't add it to the list of nodes - if (nodeId != null) { - nodes.push([ - nodeId, - { - address: { - host: address.getHost() as Host | Hostname, - port: address.getPort() as Port, - }, - // Not really needed - // But if it's needed then we need to add the information to the proto definition - lastUpdated: 0, - }, - ]); - } - }); - return nodes; - }, - timer, - ); - } - - /** - * Perform an initial database synchronisation: get k of the closest nodes - * from each seed node and add them to this database - * Establish a proxy connection to each node before adding it - * By default this operation is blocking, set `block` to false to make it - * non-blocking - */ - @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) - public async syncNodeGraph(block: boolean = true, timer?: Timer) { - this.logger.info('Syncing nodeGraph'); - for (const seedNodeId of this.getSeedNodes()) { - // Check if the connection is viable - try { - await this.getConnection(seedNodeId, timer); - } catch (e) { - if (e instanceof nodesErrors.ErrorNodeConnectionTimeout) continue; - throw e; - } - const nodes = await this.getRemoteNodeClosestNodes( - seedNodeId, - this.keyManager.getNodeId(), - timer, + try { + // Send through client + const timeout = ctx.timer.getTimeout(); + const response = await this.withConnF( + nodeId, + async (connection) => { + const client = connection.getClient(); + return await client.nodesClosestLocalNodesGet(nodeIdMessage); + }, + timeout === Infinity ? undefined : timerStart(timeout), ); - for (const [nodeId, nodeData] of nodes) { - if (!nodeId.equals(this.keyManager.getNodeId())) { - const pingAndAddNode = async () => { - const port = nodeData.address.port; - const host = await networkUtils.resolveHost(nodeData.address.host); - if (await this.pingNode(nodeId, host, port)) { - await this.nodeManager!.setNode(nodeId, nodeData.address, true); - } - }; - - if (!block) { - this.queue.push(pingAndAddNode); - } else { - try { - await pingAndAddNode(); - } catch (e) { - if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; - } - } - } - } - // Refreshing every bucket above the closest node - const refreshBuckets = async () => { - const [closestNode] = ( - await this.nodeGraph.getClosestNodes(this.keyManager.getNodeId(), 1) - ).pop()!; - const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); - for (let i = bucketIndex; i < this.nodeGraph.nodeIdBits; i++) { - this.nodeManager?.refreshBucketQueueAdd(i); + const nodes: Array<[NodeId, NodeData]> = []; + // Loop over each map element (from the returned response) and populate nodes + response.getNodeTableMap().forEach((address, nodeIdString: string) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdString); + // If the nodeId is not valid we don't add it to the list of nodes + if (nodeId != null) { + nodes.push([ + nodeId, + { + address: { + host: address.getHost() as Host | Hostname, + port: address.getPort() as Port, + }, + // Not really needed + // But if it's needed then we need to add the information to the proto definition + lastUpdated: 0, + }, + ]); } - }; - if (!block) { - this.queue.push(refreshBuckets); - } else { - await refreshBuckets(); + }); + return nodes; + } catch (e) { + if (nodesUtils.isConnectionError(e)) { + return []; } + throw e; } } @@ -726,9 +703,11 @@ class NodeConnectionManager { */ @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) public getSeedNodes(): Array { - return Object.keys(this.seedNodes).map( - (nodeIdEncoded) => nodesUtils.decodeNodeId(nodeIdEncoded)!, - ); + return Object.keys(this.seedNodes).map((nodeIdEncoded) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) never(); + return nodeId; + }); } /** @@ -738,14 +717,21 @@ class NodeConnectionManager { * @param nodeId - NodeId of the target * @param host - Host of the target node * @param port - Port of the target node - * @param timer Connection timeout timer + * @param ctx */ + public pingNode( + nodeId: NodeId, + host: Host | Hostname, + port: Port, + ctx?: Partial, + ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeConnectionManagerNotRunning()) + @timedCancellable(true, 2000) public async pingNode( nodeId: NodeId, host: Host | Hostname, port: Port, - timer?: Timer, + @context ctx: ContextTimed, ): Promise { host = await networkUtils.resolveHost(host); // If we can create a connection then we have punched though the NAT, @@ -767,20 +753,51 @@ class NodeConnectionManager { signature, ); }); - const forwardPunchPromise = this.holePunchForward( - nodeId, - host, - port, - timer, - ); + const forwardPunchPromise = this.holePunchForward(nodeId, host, port, ctx); + + const abortPromise = new Promise((_resolve, reject) => { + if (ctx.signal.aborted) throw ctx.signal.reason; + ctx.signal.addEventListener('abort', () => reject(ctx.signal.reason)); + }); try { - await Promise.any([forwardPunchPromise, ...holePunchPromises]); + await Promise.race([ + Promise.any([forwardPunchPromise, ...holePunchPromises]), + abortPromise, + ]); } catch (e) { return false; } return true; } + + protected hasBackoff(nodeId: NodeId): boolean { + const backoff = this.nodesBackoffMap.get(nodeId.toString()); + if (backoff == null) return false; + const currentTime = performance.now() + performance.timeOrigin; + const backOffDeadline = backoff.lastAttempt + backoff.delay; + return currentTime < backOffDeadline; + } + + protected increaseBackoff(nodeId: NodeId): void { + const backoff = this.nodesBackoffMap.get(nodeId.toString()); + const currentTime = performance.now() + performance.timeOrigin; + if (backoff == null) { + this.nodesBackoffMap.set(nodeId.toString(), { + lastAttempt: currentTime, + delay: this.backoffDefault, + }); + } else { + this.nodesBackoffMap.set(nodeId.toString(), { + lastAttempt: currentTime, + delay: backoff.delay * this.backoffMultiplier, + }); + } + } + + protected removeBackoff(nodeId: NodeId): void { + this.nodesBackoffMap.delete(nodeId.toString()); + } } export default NodeConnectionManager; diff --git a/src/nodes/NodeGraph.ts b/src/nodes/NodeGraph.ts index fda9caba1..5f65db114 100644 --- a/src/nodes/NodeGraph.ts +++ b/src/nodes/NodeGraph.ts @@ -151,6 +151,15 @@ class NodeGraph { return space; } + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) + public async lockBucket(bucketIndex: number, tran: DBTransaction) { + const keyPath = [ + ...this.nodeGraphMetaDbPath, + nodesUtils.bucketKey(bucketIndex), + ]; + return await tran.lock(keyPath.join('')); + } + @ready(new nodesErrors.ErrorNodeGraphNotRunning()) public async getNode( nodeId: NodeId, diff --git a/src/nodes/NodeManager.ts b/src/nodes/NodeManager.ts index aa0740ee5..7d313f62c 100644 --- a/src/nodes/NodeManager.ts +++ b/src/nodes/NodeManager.ts @@ -1,31 +1,36 @@ import type { DB, DBTransaction } from '@matrixai/db'; import type NodeConnectionManager from './NodeConnectionManager'; import type NodeGraph from './NodeGraph'; -import type Queue from './Queue'; import type KeyManager from '../keys/KeyManager'; import type { PublicKeyPem } from '../keys/types'; import type Sigchain from '../sigchain/Sigchain'; import type { ChainData, ChainDataEncoded } from '../sigchain/types'; -import type { - NodeId, - NodeAddress, - NodeBucket, - NodeBucketIndex, -} from '../nodes/types'; +import type { NodeId, NodeAddress, NodeBucket, NodeBucketIndex } from './types'; import type { ClaimEncoded } from '../claims/types'; -import type { Timer } from '../types'; -import type { PromiseDeconstructed } from '../types'; +import type TaskManager from '../tasks/TaskManager'; +import type { TaskHandler, TaskHandlerId, Task } from '../tasks/types'; +import type { ContextTimed } from 'contexts/types'; +import type { PromiseCancellable } from '@matrixai/async-cancellable'; +import type { Host, Port } from '../network/types'; import Logger from '@matrixai/logger'; import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; +import { Semaphore, Lock } from '@matrixai/async-locks'; +import { IdInternal } from '@matrixai/id'; +import { Timer } from '@matrixai/timer'; import * as nodesErrors from './errors'; import * as nodesUtils from './utils'; +import * as tasksErrors from '../tasks/errors'; +import { timedCancellable, context } from '../contexts'; import * as networkUtils from '../network/utils'; import * as validationUtils from '../validation/utils'; import * as utilsPB from '../proto/js/polykey/v1/utils/utils_pb'; import * as claimsErrors from '../claims/errors'; import * as sigchainUtils from '../sigchain/utils'; import * as claimsUtils from '../claims/utils'; -import { promise, timerStart } from '../utils/utils'; +import { never } from '../utils/utils'; + +const abortEphemeralTaskReason = Symbol('abort ephemeral task reason'); +const abortSingletonTaskReason = Symbol('abort singleton task reason'); interface NodeManager extends StartStop {} @StartStop() @@ -36,19 +41,80 @@ class NodeManager { protected keyManager: KeyManager; protected nodeConnectionManager: NodeConnectionManager; protected nodeGraph: NodeGraph; - protected queue: Queue; - // Refresh bucket timer - protected refreshBucketDeadlineMap: Map = new Map(); - protected refreshBucketTimer: NodeJS.Timer; - protected refreshBucketNext: NodeBucketIndex; - public readonly refreshBucketTimerDefault; - protected refreshBucketQueue: Set = new Set(); - protected refreshBucketQueueRunning: boolean = false; - protected refreshBucketQueueRunner: Promise; - protected refreshBucketQueuePlug_: PromiseDeconstructed = promise(); - protected refreshBucketQueueDrained_: PromiseDeconstructed = promise(); - protected refreshBucketQueuePause_: PromiseDeconstructed = promise(); - protected refreshBucketQueueAbortController: AbortController; + protected taskManager: TaskManager; + protected refreshBucketDelay: number; + protected refreshBucketDelayJitter: number; + protected pendingNodes: Map> = new Map(); + + public readonly basePath = this.constructor.name; + protected refreshBucketHandler: TaskHandler = async ( + ctx, + _taskInfo, + bucketIndex, + ) => { + await this.refreshBucket(bucketIndex, ctx); + // When completed reschedule the task + const jitter = nodesUtils.refreshBucketsDelayJitter( + this.refreshBucketDelay, + this.refreshBucketDelayJitter, + ); + await this.taskManager.scheduleTask({ + delay: this.refreshBucketDelay + jitter, + handlerId: this.refreshBucketHandlerId, + lazy: true, + parameters: [bucketIndex], + path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + priority: 0, + }); + }; + public readonly refreshBucketHandlerId = + `${this.basePath}.${this.refreshBucketHandler.name}` as TaskHandlerId; + protected gcBucketHandler: TaskHandler = async ( + ctx, + _taskInfo, + bucketIndex: number, + ) => { + await this.garbageCollectBucket(bucketIndex, 1500, ctx); + // Checking for any new pending tasks + const pendingNodesRemaining = this.pendingNodes.get(bucketIndex); + if (pendingNodesRemaining == null || pendingNodesRemaining.size === 0) { + return; + } + // Re-schedule the task + await this.setupGCTask(bucketIndex); + }; + public readonly gcBucketHandlerId = + `${this.basePath}.${this.gcBucketHandler.name}` as TaskHandlerId; + protected pingAndSetNodeHandler: TaskHandler = async ( + ctx, + _taskInfo, + nodeIdEncoded: string, + host: Host, + port: Port, + ) => { + const nodeId = nodesUtils.decodeNodeId(nodeIdEncoded); + if (nodeId == null) { + this.logger.error( + `pingAndSetNodeHandler received invalid NodeId: ${nodeIdEncoded}`, + ); + never(); + } + const host_ = await networkUtils.resolveHost(host); + if ( + await this.pingNode(nodeId, { host: host_, port }, { signal: ctx.signal }) + ) { + await this.setNode( + nodeId, + { host: host_, port }, + false, + false, + 1500, + ctx, + ); + } + }; + public readonly pingAndSetNodeHandlerId: TaskHandlerId = + `${this.basePath}.${this.pingAndSetNodeHandler.name}` as TaskHandlerId; constructor({ db, @@ -56,8 +122,9 @@ class NodeManager { sigchain, nodeConnectionManager, nodeGraph, - queue, - refreshBucketTimerDefault = 3600000, // 1 hour in milliseconds + taskManager, + refreshBucketDelay = 3600000, // 1 hour in milliseconds + refreshBucketDelayJitter = 0.5, // Multiple of refreshBucketDelay to jitter by logger, }: { db: DB; @@ -65,8 +132,9 @@ class NodeManager { sigchain: Sigchain; nodeConnectionManager: NodeConnectionManager; nodeGraph: NodeGraph; - queue: Queue; - refreshBucketTimerDefault?: number; + taskManager: TaskManager; + refreshBucketDelay?: number; + refreshBucketDelayJitter?: number; logger?: Logger; }) { this.logger = logger ?? new Logger(this.constructor.name); @@ -75,21 +143,51 @@ class NodeManager { this.sigchain = sigchain; this.nodeConnectionManager = nodeConnectionManager; this.nodeGraph = nodeGraph; - this.queue = queue; - this.refreshBucketTimerDefault = refreshBucketTimerDefault; + this.taskManager = taskManager; + this.refreshBucketDelay = refreshBucketDelay; + // Clamped from 0 to 1 inclusive + this.refreshBucketDelayJitter = Math.max( + 0, + Math.min(refreshBucketDelayJitter, 1), + ); } public async start() { this.logger.info(`Starting ${this.constructor.name}`); - this.startRefreshBucketTimers(); - this.refreshBucketQueueRunner = this.startRefreshBucketQueue(); + this.logger.info(`Registering handler for setNode`); + this.taskManager.registerHandler( + this.refreshBucketHandlerId, + this.refreshBucketHandler, + ); + this.taskManager.registerHandler( + this.gcBucketHandlerId, + this.gcBucketHandler, + ); + this.taskManager.registerHandler( + this.pingAndSetNodeHandlerId, + this.pingAndSetNodeHandler, + ); + await this.setupRefreshBucketTasks(); this.logger.info(`Started ${this.constructor.name}`); } public async stop() { this.logger.info(`Stopping ${this.constructor.name}`); - await this.stopRefreshBucketTimers(); - await this.stopRefreshBucketQueue(); + this.logger.info('Cancelling ephemeral tasks'); + const tasks: Array> = []; + for await (const task of this.taskManager.getTasks('asc', false, [ + this.basePath, + ])) { + tasks.push(task.promise()); + task.cancel(abortEphemeralTaskReason); + } + // We don't care about the result, only that they've ended + await Promise.allSettled(tasks); + this.logger.info('Cancelled ephemeral tasks'); + this.logger.info(`Unregistering handler for setNode`); + this.taskManager.deregisterHandler(this.refreshBucketHandlerId); + this.taskManager.deregisterHandler(this.gcBucketHandlerId); + this.taskManager.deregisterHandler(this.pingAndSetNodeHandlerId); this.logger.info(`Stopped ${this.constructor.name}`); } @@ -98,17 +196,24 @@ class NodeManager { * @return true if online, false if offline * @param nodeId - NodeId of the node we're pinging * @param address - Optional Host and Port we want to ping - * @param timer Connection timeout timer + * @param ctx */ - public async pingNode( + public pingNode( nodeId: NodeId, address?: NodeAddress, - timer?: Timer, + ctx?: Partial, + ): PromiseCancellable; + @timedCancellable(true, 2000) + public async pingNode( + nodeId: NodeId, + address: NodeAddress | undefined, + @context ctx: ContextTimed, ): Promise { // We need to attempt a connection using the proxies // For now we will just do a forward connect + relay message const targetAddress = - address ?? (await this.nodeConnectionManager.findNode(nodeId)); + address ?? + (await this.nodeConnectionManager.findNode(nodeId, false, ctx)); if (targetAddress == null) { throw new nodesErrors.ErrorNodeGraphNodeIdNotFound(); } @@ -117,7 +222,7 @@ class NodeManager { nodeId, targetHost, targetAddress.port, - timer, + ctx, ); } @@ -396,19 +501,31 @@ class NodeManager { * This operation is blocking by default - set `block` 2qto false to make it non-blocking * @param nodeId - Id of the node we wish to add * @param nodeAddress - Expected address of the node we want to add - * @param block - Flag for if the operation should block or utilize the async queue + * @param block - When true it will wait for any garbage collection to finish before returning. * @param force - Flag for if we want to add the node without authenticating or if the bucket is full. * This will drop the oldest node in favor of the new. - * @param timeout Connection timeout + * @param pingTimeout - Timeout for each ping opearation during garbage collection. + * @param ctx * @param tran */ + public setNode( + nodeId: NodeId, + nodeAddress: NodeAddress, + block?: boolean, + force?: boolean, + pingTimeout?: number, + ctx?: Partial, + tran?: DBTransaction, + ): PromiseCancellable; @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + @timedCancellable(true, 20000) public async setNode( nodeId: NodeId, nodeAddress: NodeAddress, - block: boolean = true, + block: boolean = false, force: boolean = false, - timeout?: number, + pingTimeout: number = 1500, + @context ctx: ContextTimed, tran?: DBTransaction, ): Promise { // We don't want to add our own node @@ -419,7 +536,7 @@ class NodeManager { if (tran == null) { return this.db.withTransactionF((tran) => - this.setNode(nodeId, nodeAddress, block, force, timeout, tran), + this.setNode(nodeId, nodeAddress, block, force, pingTimeout, ctx, tran), ); } @@ -431,9 +548,11 @@ class NodeManager { // We need to ping the oldest node. If the ping succeeds we need to update // the lastUpdated of the oldest node and drop the new one. If the ping // fails we delete the old node and add in the new one. + const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); + // To avoid conflict we want to lock on the bucket index + await this.nodeGraph.lockBucket(bucketIndex, tran); const nodeData = await this.nodeGraph.getNode(nodeId, tran); // If this is a new entry, check the bucket limit - const [bucketIndex] = this.nodeGraph.bucketIndex(nodeId); const count = await this.nodeGraph.getBucketMetaProp( bucketIndex, 'count', @@ -444,15 +563,20 @@ class NodeManager { // We want to add or update the node await this.nodeGraph.setNode(nodeId, nodeAddress, tran); // Updating the refreshBucket timer - this.refreshBucketUpdateDeadline(bucketIndex); + await this.updateRefreshBucketDelay( + bucketIndex, + this.refreshBucketDelay, + true, + tran, + ); } else { // We want to add a node but the bucket is full - // We need to ping the oldest node if (force) { // We just add the new node anyway without checking the old one const oldNodeId = ( await this.nodeGraph.getOldestNode(bucketIndex, 1, tran) - ).pop()!; + ).pop(); + if (oldNodeId == null) never(); this.logger.debug( `Force was set, removing ${nodesUtils.encodeNodeId( oldNodeId, @@ -461,80 +585,188 @@ class NodeManager { await this.nodeGraph.unsetNode(oldNodeId, tran); await this.nodeGraph.setNode(nodeId, nodeAddress, tran); // Updating the refreshBucket timer - this.refreshBucketUpdateDeadline(bucketIndex); - return; - } else if (block) { - this.logger.debug( - `Bucket was full and blocking was true, garbage collecting old nodes to add ${nodesUtils.encodeNodeId( - nodeId, - )}`, - ); - await this.garbageCollectOldNode( + await this.updateRefreshBucketDelay( bucketIndex, - nodeId, - nodeAddress, - timeout, - ); - } else { - this.logger.debug( - `Bucket was full and blocking was false, adding ${nodesUtils.encodeNodeId( - nodeId, - )} to queue`, - ); - // Re-attempt this later asynchronously by adding the the queue - this.queue.push(() => - this.setNode(nodeId, nodeAddress, true, false, timeout), + this.refreshBucketDelay, + true, + tran, ); + return; } + this.logger.debug( + `Bucket was full, adding ${nodesUtils.encodeNodeId( + nodeId, + )} to pending list`, + ); + // Add the node to the pending nodes list + await this.addPendingNode( + bucketIndex, + nodeId, + nodeAddress, + block, + pingTimeout, + ctx, + tran, + ); + } + } + + protected garbageCollectBucket( + bucketIndex: number, + pingTimeout?: number, + ctx?: Partial, + tran?: DBTransaction, + ): PromiseCancellable; + @timedCancellable(true, 20000) + protected async garbageCollectBucket( + bucketIndex: number, + pingTimeout: number = 1500, + @context ctx: ContextTimed, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.garbageCollectBucket(bucketIndex, pingTimeout, ctx, tran), + ); + } + + // This needs to: + // 1. Iterate over every node within the bucket pinging K at a time + // 2. remove any un-responsive nodes until there is room of all pending + // or run out of existing nodes + // 3. fill in the bucket with pending nodes until full + // 4. throw out remaining pending nodes + + const pendingNodes = this.pendingNodes.get(bucketIndex); + // No nodes mean nothing to do + if (pendingNodes == null || pendingNodes.size === 0) return; + this.pendingNodes.set(bucketIndex, new Map()); + // Locking on bucket + await this.nodeGraph.lockBucket(bucketIndex, tran); + const semaphore = new Semaphore(3); + + // Iterating over existing nodes + const bucket = await this.getBucket(bucketIndex, tran); + if (bucket == null) never(); + let removedNodes = 0; + const unsetLock = new Lock(); + const pendingPromises: Array> = []; + for (const [nodeId, nodeData] of bucket) { + if (removedNodes >= pendingNodes.size) break; + await semaphore.waitForUnlock(); + if (ctx.signal?.aborted === true) break; + const [semaphoreReleaser] = await semaphore.lock()(); + pendingPromises.push( + (async () => { + // Ping and remove or update node in bucket + const pingCtx = { + signal: ctx.signal, + timer: new Timer({ delay: pingTimeout }), + }; + if (await this.pingNode(nodeId, nodeData.address, pingCtx)) { + // Succeeded so update + await this.setNode( + nodeId, + nodeData.address, + false, + false, + undefined, + undefined, + tran, + ); + } else { + // We need to lock this since it's concurrent + // and shares the transaction + await unsetLock.withF(async () => { + await this.unsetNode(nodeId, tran); + removedNodes += 1; + }); + } + })() + // Clean ensure semaphore is released + .finally(async () => await semaphoreReleaser()), + ); + } + // Wait for pending pings to complete + await Promise.all(pendingPromises); + // Fill in bucket with pending nodes + for (const [nodeIdString, address] of pendingNodes) { + if (removedNodes <= 0) break; + const nodeId = IdInternal.fromString(nodeIdString); + await this.setNode( + nodeId, + address, + false, + false, + undefined, + undefined, + tran, + ); + removedNodes -= 1; } } - private async garbageCollectOldNode( + protected async addPendingNode( bucketIndex: number, nodeId: NodeId, nodeAddress: NodeAddress, - timeout?: number, - ) { - const oldestNodeIds = await this.nodeGraph.getOldestNode(bucketIndex, 3); - // We want to concurrently ping the nodes - const pingPromises = oldestNodeIds.map((nodeId) => { - const doPing = async (): Promise<{ - nodeId: NodeId; - success: boolean; - }> => { - // This needs to return nodeId and ping result - const data = await this.nodeGraph.getNode(nodeId); - if (data == null) return { nodeId, success: false }; - const timer = timeout != null ? timerStart(timeout) : undefined; - const result = await this.pingNode(nodeId, nodeAddress, timer); - return { nodeId, success: result }; - }; - return doPing(); - }); - const pingResults = await Promise.all(pingPromises); - for (const { nodeId, success } of pingResults) { - if (success) { - // Ping succeeded, update the node - this.logger.debug( - `Ping succeeded for ${nodesUtils.encodeNodeId(nodeId)}`, - ); - const node = (await this.nodeGraph.getNode(nodeId))!; - await this.nodeGraph.setNode(nodeId, node.address); - // Updating the refreshBucket timer - this.refreshBucketUpdateDeadline(bucketIndex); - } else { - this.logger.debug(`Ping failed for ${nodesUtils.encodeNodeId(nodeId)}`); - // Otherwise we remove the node - await this.nodeGraph.unsetNode(nodeId); + block: boolean = false, + pingTimeout: number = 1500, + ctx?: ContextTimed, + tran?: DBTransaction, + ): Promise { + if (!this.pendingNodes.has(bucketIndex)) { + this.pendingNodes.set(bucketIndex, new Map()); + } + const pendingNodes = this.pendingNodes.get(bucketIndex); + pendingNodes!.set(nodeId.toString(), nodeAddress); + // No need to re-set it in the map, Maps are by reference + + // If set to blocking we just run the GC operation here + // without setting up a new task + if (block) { + await this.garbageCollectBucket(bucketIndex, pingTimeout, ctx, tran); + return; + } + await this.setupGCTask(bucketIndex); + } + + protected async setupGCTask(bucketIndex: number) { + // Check and start a 'garbageCollect` bucket task + let scheduled: boolean = false; + for await (const task of this.taskManager.getTasks('asc', true, [ + this.basePath, + this.gcBucketHandlerId, + `${bucketIndex}`, + ])) { + switch (task.status) { + case 'queued': + case 'active': + // Ignore active tasks + break; + case 'scheduled': + { + if (scheduled) { + // Duplicate scheduled are removed + task.cancel(abortSingletonTaskReason); + break; + } + scheduled = true; + } + break; + default: + task.cancel(abortSingletonTaskReason); + break; } } - // Check if we now have room and add the new node - const count = await this.nodeGraph.getBucketMetaProp(bucketIndex, 'count'); - if (count < this.nodeGraph.nodeBucketLimit) { - this.logger.debug(`Bucket ${bucketIndex} now has room, adding new node`); - await this.nodeGraph.setNode(nodeId, nodeAddress); - // Updating the refreshBucket timer - this.refreshBucketUpdateDeadline(bucketIndex); + if (!scheduled) { + // If none were found, schedule a new one + await this.taskManager.scheduleTask({ + handlerId: this.gcBucketHandlerId, + parameters: [bucketIndex], + path: [this.basePath, this.gcBucketHandlerId, `${bucketIndex}`], + lazy: true, + }); } } @@ -559,13 +791,17 @@ class NodeManager { * Connections during the search will will share node information with other * nodes. * @param bucketIndex - * @param options + * @param ctx */ + public refreshBucket( + bucketIndex: number, + ctx?: Partial, + ): PromiseCancellable; + @timedCancellable(true, 20000) public async refreshBucket( bucketIndex: NodeBucketIndex, - options: { signal?: AbortSignal } = {}, - ) { - const { signal } = { ...options }; + @context ctx: ContextTimed, + ): Promise { // We need to generate a random nodeId for this bucket const nodeId = this.keyManager.getNodeId(); const bucketRandomNodeId = nodesUtils.generateRandomNodeIdForBucket( @@ -573,169 +809,236 @@ class NodeManager { bucketIndex, ); // We then need to start a findNode procedure - await this.nodeConnectionManager.findNode(bucketRandomNodeId, { signal }); + await this.nodeConnectionManager.findNode(bucketRandomNodeId, true, ctx); } - // Refresh bucket activity timer methods + protected async setupRefreshBucketTasks(tran?: DBTransaction) { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.setupRefreshBucketTasks(tran), + ); + } - private startRefreshBucketTimers() { - // Setting initial bucket to refresh - this.refreshBucketNext = 0; - // Setting initial deadline - this.refreshBucketTimerReset(this.refreshBucketTimerDefault); + this.logger.info('Setting up refreshBucket tasks'); + // 1. Iterate over existing tasks and reset the delay + const existingTasks: Array = new Array(this.nodeGraph.nodeIdBits); + for await (const task of this.taskManager.getTasks( + 'asc', + true, + [this.basePath, this.refreshBucketHandlerId], + tran, + )) { + const bucketIndex = parseInt(task.path[0]); + switch (task.status) { + case 'scheduled': + { + // If it's scheduled then reset delay + existingTasks[bucketIndex] = true; + // Total delay is refreshBucketDelay + time since task creation + const delay = + performance.now() + + performance.timeOrigin - + task.created.getTime() + + this.refreshBucketDelay + + nodesUtils.refreshBucketsDelayJitter( + this.refreshBucketDelay, + this.refreshBucketDelayJitter, + ); + await this.taskManager.updateTask(task.id, { delay }, tran); + } + break; + case 'queued': + case 'active': + // If it's running then leave it + existingTasks[bucketIndex] = true; + break; + default: + // Otherwise, ignore it, should be re-created + existingTasks[bucketIndex] = false; + } + } + // 2. Recreate any missing tasks for buckets for ( let bucketIndex = 0; - bucketIndex < this.nodeGraph.nodeIdBits; + bucketIndex < existingTasks.length; bucketIndex++ ) { - const deadline = Date.now() + this.refreshBucketTimerDefault; - this.refreshBucketDeadlineMap.set(bucketIndex, deadline); + const exists = existingTasks[bucketIndex]; + if (!exists) { + // Create a new task + this.logger.debug( + `Creating refreshBucket task for bucket ${bucketIndex}`, + ); + const jitter = nodesUtils.refreshBucketsDelayJitter( + this.refreshBucketDelay, + this.refreshBucketDelayJitter, + ); + await this.taskManager.scheduleTask({ + handlerId: this.refreshBucketHandlerId, + delay: this.refreshBucketDelay + jitter, + lazy: true, + parameters: [bucketIndex], + path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + priority: 0, + }); + } } + this.logger.info('Set up refreshBucket tasks'); } - private async stopRefreshBucketTimers() { - clearTimeout(this.refreshBucketTimer); - } - - private refreshBucketTimerReset(timeout: number) { - clearTimeout(this.refreshBucketTimer); - this.refreshBucketTimer = setTimeout(() => { - this.refreshBucketRefreshTimer(); - }, timeout); - } + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + public async updateRefreshBucketDelay( + bucketIndex: number, + delay: number = this.refreshBucketDelay, + lazy: boolean = true, + tran?: DBTransaction, + ): Promise { + if (tran == null) { + return this.db.withTransactionF((tran) => + this.updateRefreshBucketDelay(bucketIndex, delay, lazy, tran), + ); + } - public refreshBucketUpdateDeadline(bucketIndex: NodeBucketIndex) { - // Update the map deadline - this.refreshBucketDeadlineMap.set( - bucketIndex, - Date.now() + this.refreshBucketTimerDefault, + const jitter = nodesUtils.refreshBucketsDelayJitter( + delay, + this.refreshBucketDelayJitter, ); - // If the bucket was pending a refresh we remove it - this.refreshBucketQueueRemove(bucketIndex); - if (bucketIndex === this.refreshBucketNext) { - // Bucket is same as next bucket, this affects the timer - this.refreshBucketRefreshTimer(); + let foundTask: Task | undefined; + let existingTask = false; + for await (const task of this.taskManager.getTasks( + 'asc', + true, + [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + tran, + )) { + if (!existingTask) { + foundTask = task; + // Update the first one + // total delay is refreshBucketDelay + time since task creation + // time since task creation = now - creation time; + const delayNew = + performance.now() + + performance.timeOrigin - + task.created.getTime() + + delay + + jitter; + try { + await this.taskManager.updateTask(task.id, { delay: delayNew }); + existingTask = true; + } catch (e) { + if (e instanceof tasksErrors.ErrorTaskRunning) { + // Ignore running + existingTask = true; + } else if (!(e instanceof tasksErrors.ErrorTaskMissing)) { + throw e; + } + } + this.logger.debug( + `Updating refreshBucket task for bucket ${bucketIndex}`, + ); + } else { + // These are extra, so we cancel them + task.cancel(abortSingletonTaskReason); + this.logger.warn( + `Duplicate refreshBucket task was found for bucket ${bucketIndex}, cancelling`, + ); + } + } + if (!existingTask) { + this.logger.debug( + `No refreshBucket task for bucket ${bucketIndex}, new one was created`, + ); + foundTask = await this.taskManager.scheduleTask({ + delay: delay + jitter, + handlerId: this.refreshBucketHandlerId, + lazy: true, + parameters: [bucketIndex], + path: [this.basePath, this.refreshBucketHandlerId, `${bucketIndex}`], + priority: 0, + }); } + if (foundTask == null) never(); + return foundTask; } - private refreshBucketRefreshTimer() { - // Getting new closest deadline - let closestBucket = this.refreshBucketNext; - let closestDeadline = Date.now() + this.refreshBucketTimerDefault; - const now = Date.now(); - for (const [bucketIndex, deadline] of this.refreshBucketDeadlineMap) { - // Skip any queued buckets marked by 0 deadline - if (deadline === 0) continue; - if (deadline <= now) { - // Deadline for this has already passed, we add it to the queue - this.refreshBucketQueueAdd(bucketIndex); + /** + * Perform an initial database synchronisation: get k of the closest nodes + * from each seed node and add them to this database + * Establish a proxy connection to each node before adding it + * By default this operation is blocking, set `block` to false to make it + * non-blocking + */ + public syncNodeGraph( + block?: boolean, + ctx?: Partial, + ): PromiseCancellable; + @ready(new nodesErrors.ErrorNodeManagerNotRunning()) + @timedCancellable(true, 20000) + public async syncNodeGraph( + block: boolean = true, + @context ctx: ContextTimed, + ): Promise { + this.logger.info('Syncing nodeGraph'); + for (const seedNodeId of this.nodeConnectionManager.getSeedNodes()) { + // Check if the connection is viable + if ( + (await this.pingNode(seedNodeId, undefined, { signal: ctx.signal })) === + false + ) { continue; } - if (deadline < closestDeadline) { - closestBucket = bucketIndex; - closestDeadline = deadline; + const closestNodes = + await this.nodeConnectionManager.getRemoteNodeClosestNodes( + seedNodeId, + this.keyManager.getNodeId(), + ctx, + ); + const localNodeId = this.keyManager.getNodeId(); + for (const [nodeId, nodeData] of closestNodes) { + if (!localNodeId.equals(nodeId)) { + const pingAndSetTask = await this.taskManager.scheduleTask({ + delay: 0, + handlerId: this.pingAndSetNodeHandlerId, + lazy: !block, + parameters: [ + nodesUtils.encodeNodeId(nodeId), + nodeData.address.host, + nodeData.address.port, + ], + path: [this.basePath, this.pingAndSetNodeHandlerId], + // Need to be somewhat active so high priority + priority: 100, + }); + if (block) { + try { + await pingAndSetTask.promise(); + } catch (e) { + if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; + } + } + } } - } - // Working out time left - const timeout = closestDeadline - Date.now(); - this.logger.debug( - `Refreshing refreshBucket timer with new timeout ${timeout}`, - ); - // Updating timer and next - this.refreshBucketNext = closestBucket; - this.refreshBucketTimerReset(timeout); - } - - // Refresh bucket async queue methods - - public refreshBucketQueueAdd(bucketIndex: NodeBucketIndex) { - this.logger.debug(`Adding bucket ${bucketIndex} to queue`); - this.refreshBucketDeadlineMap.set(bucketIndex, 0); - this.refreshBucketQueue.add(bucketIndex); - this.refreshBucketQueueUnplug(); - } - - public refreshBucketQueueRemove(bucketIndex: NodeBucketIndex) { - this.logger.debug(`Removing bucket ${bucketIndex} from queue`); - this.refreshBucketQueue.delete(bucketIndex); - } - - public async refreshBucketQueueDrained() { - await this.refreshBucketQueueDrained_.p; - } - - public refreshBucketQueuePause() { - this.logger.debug('Pausing refreshBucketQueue'); - this.refreshBucketQueuePause_ = promise(); - } - - public refreshBucketQueueResume() { - this.logger.debug('Resuming refreshBucketQueue'); - this.refreshBucketQueuePause_.resolveP(); - } - - private async startRefreshBucketQueue(): Promise { - this.refreshBucketQueueRunning = true; - this.refreshBucketQueuePlug(); - this.refreshBucketQueueResume(); - let iterator: IterableIterator | undefined; - this.refreshBucketQueueAbortController = new AbortController(); - const pace = async () => { - // Wait if paused - await this.refreshBucketQueuePause_.p; - // Wait for plug - await this.refreshBucketQueuePlug_.p; - if (iterator == null) { - iterator = this.refreshBucketQueue[Symbol.iterator](); + // Refreshing every bucket above the closest node + let closestNodeInfo = closestNodes.pop(); + if ( + closestNodeInfo != null && + this.keyManager.getNodeId().equals(closestNodeInfo[0]) + ) { + // Skip our nodeId if it exists + closestNodeInfo = closestNodes.pop(); } - return this.refreshBucketQueueRunning; - }; - while (await pace()) { - const bucketIndex: NodeBucketIndex = iterator?.next().value; - if (bucketIndex == null) { - // Iterator is empty, plug and continue - iterator = undefined; - this.refreshBucketQueuePlug(); - continue; + let index = this.nodeGraph.nodeIdBits; + if (closestNodeInfo != null) { + const [closestNode] = closestNodeInfo; + const [bucketIndex] = this.nodeGraph.bucketIndex(closestNode); + index = bucketIndex; } - // Do the job - this.logger.debug( - `processing refreshBucket for bucket ${bucketIndex}, ${this.refreshBucketQueue.size} left in queue`, - ); - try { - await this.refreshBucket(bucketIndex, { - signal: this.refreshBucketQueueAbortController.signal, - }); - } catch (e) { - if (e instanceof nodesErrors.ErrorNodeAborted) break; - throw e; + for (let i = index; i < this.nodeGraph.nodeIdBits; i++) { + const task = await this.updateRefreshBucketDelay(i, 0, !block); + if (block) await task.promise(); } - // Remove from queue and update bucket deadline - this.refreshBucketQueue.delete(bucketIndex); - this.refreshBucketUpdateDeadline(bucketIndex); } - this.logger.debug('startRefreshBucketQueue has ended'); - } - - private async stopRefreshBucketQueue(): Promise { - // Flag end and await queue finish - this.refreshBucketQueueAbortController.abort(); - this.refreshBucketQueueRunning = false; - this.refreshBucketQueueUnplug(); - this.refreshBucketQueueResume(); - } - - private refreshBucketQueuePlug() { - this.logger.debug('refresh bucket queue has plugged'); - this.refreshBucketQueuePlug_ = promise(); - this.refreshBucketQueueDrained_?.resolveP(); - } - - private refreshBucketQueueUnplug() { - this.logger.debug('refresh bucket queue has unplugged'); - this.refreshBucketQueueDrained_ = promise(); - this.refreshBucketQueuePlug_?.resolveP(); } } diff --git a/src/nodes/Queue.ts b/src/nodes/Queue.ts deleted file mode 100644 index ed2eaa06e..000000000 --- a/src/nodes/Queue.ts +++ /dev/null @@ -1,91 +0,0 @@ -import type { PromiseDeconstructed } from '../types'; -import Logger from '@matrixai/logger'; -import { StartStop, ready } from '@matrixai/async-init/dist/StartStop'; -import * as nodesErrors from './errors'; -import { promise } from '../utils'; - -interface Queue extends StartStop {} -@StartStop() -class Queue { - protected logger: Logger; - protected end: boolean = false; - protected queue: Array<() => Promise> = []; - protected runner: Promise; - protected plug_: PromiseDeconstructed = promise(); - protected drained_: PromiseDeconstructed = promise(); - - constructor({ logger }: { logger?: Logger }) { - this.logger = logger ?? new Logger(this.constructor.name); - } - - public async start() { - this.logger.info(`Starting ${this.constructor.name}`); - const start = async () => { - this.logger.debug('Starting queue'); - this.plug(); - const pace = async () => { - await this.plug_.p; - return !this.end; - }; - // While queue hasn't ended - while (await pace()) { - const job = this.queue.shift(); - if (job == null) { - // If the queue is empty then we pause the queue - this.plug(); - continue; - } - try { - await job(); - } catch (e) { - if (!(e instanceof nodesErrors.ErrorNodeGraphSameNodeId)) throw e; - } - } - this.logger.debug('queue has ended'); - }; - this.runner = start(); - this.logger.info(`Started ${this.constructor.name}`); - } - - public async stop() { - this.logger.info(`Stopping ${this.constructor.name}`); - this.logger.debug('Stopping queue'); - // Tell the queue runner to end - this.end = true; - this.unplug(); - // Wait for runner to finish it's current job - await this.runner; - this.logger.info(`Stopped ${this.constructor.name}`); - } - - /** - * This adds a setNode operation to the queue - */ - public push(f: () => Promise): void { - this.queue.push(f); - this.unplug(); - } - - @ready(new nodesErrors.ErrorQueueNotRunning()) - public async drained(): Promise { - await this.drained_.p; - } - - private plug(): void { - this.logger.debug('Plugging queue'); - // Pausing queue - this.plug_ = promise(); - // Signaling queue is empty - this.drained_.resolveP(); - } - - private unplug(): void { - this.logger.debug('Unplugging queue'); - // Starting queue - this.plug_.resolveP(); - // Signalling queue is running - this.drained_ = promise(); - } -} - -export default Queue; diff --git a/src/nodes/utils.ts b/src/nodes/utils.ts index 1fe3c799d..f1c43b658 100644 --- a/src/nodes/utils.ts +++ b/src/nodes/utils.ts @@ -8,8 +8,11 @@ import type { KeyPath } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; import lexi from 'lexicographic-integer'; import { utils as dbUtils } from '@matrixai/db'; +import * as nodesErrors from './errors'; import { bytes2BigInt } from '../utils'; import * as keysUtils from '../keys/utils'; +import * as grpcErrors from '../grpc/errors'; +import * as agentErrors from '../agent/errors'; const sepBuffer = dbUtils.sep; @@ -310,6 +313,38 @@ function generateRandomNodeIdForBucket( return xOrNodeId(nodeId, randomDistanceForBucket); } +/** + * This is used to check if the given error is the result of a connection failure. + * Connection failures can happen due to the following. + * Failure to establish a connection, + * an existing connection fails, + * the GRPC client has been destroyed, + * or the NodeConnection has been destroyed. + * This is generally used to check the connection has failed + * before cleaning it up. + */ +function isConnectionError(e): boolean { + return ( + e instanceof nodesErrors.ErrorNodeConnectionDestroyed || + e instanceof grpcErrors.ErrorGRPC || + e instanceof agentErrors.ErrorAgentClientDestroyed + ); +} + +/** + * This generates a random delay based on the given delay and jitter multiplier. + * For example, a delay of 100 and multiplier of 0.5 would result in a delay + * randomly between 50 and 150. + * @param delay - base delay to 'jitter' around + * @param jitterMultiplier - jitter amount as a multiple of the delay + */ +function refreshBucketsDelayJitter( + delay: number, + jitterMultiplier: number, +): number { + return (Math.random() - 0.5) * delay * jitterMultiplier; +} + export { sepBuffer, encodeNodeId, @@ -330,4 +365,6 @@ export { generateRandomDistanceForBucket, xOrNodeId, generateRandomNodeIdForBucket, + isConnectionError, + refreshBucketsDelayJitter, }; diff --git a/src/tasks/TaskManager.ts b/src/tasks/TaskManager.ts index 6dc221def..d4c00b032 100644 --- a/src/tasks/TaskManager.ts +++ b/src/tasks/TaskManager.ts @@ -31,6 +31,7 @@ import * as utils from '../utils'; const abortSchedulingLoopReason = Symbol('abort scheduling loop reason'); const abortQueuingLoopReason = Symbol('abort queuing loop reason'); +interface TaskManager extends CreateDestroyStartStop {} @CreateDestroyStartStop( new tasksErrors.ErrorTaskManagerRunning(), new tasksErrors.ErrorTaskManagerDestroyed(), @@ -235,7 +236,6 @@ class TaskManager { * Stop the scheduling and queuing loop * This call is idempotent */ - @ready(new tasksErrors.ErrorTaskManagerNotRunning(), false, ['stopping']) public async stopProcessing(): Promise { await Promise.all([this.stopQueueing(), this.stopScheduling()]); } @@ -244,7 +244,6 @@ class TaskManager { * Stop the active tasks * This call is idempotent */ - @ready(new tasksErrors.ErrorTaskManagerNotRunning(), false, ['stopping']) public async stopTasks(): Promise { for (const [, activePromise] of this.activePromises) { activePromise.cancel(new tasksErrors.ErrorTaskStop()); diff --git a/tests/agent/GRPCClientAgent.test.ts b/tests/agent/GRPCClientAgent.test.ts index c7f710295..2a932aede 100644 --- a/tests/agent/GRPCClientAgent.test.ts +++ b/tests/agent/GRPCClientAgent.test.ts @@ -6,7 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -41,7 +41,7 @@ describe(GRPCClientAgent.name, () => { let keyManager: KeyManager; let vaultManager: VaultManager; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -104,12 +104,16 @@ describe(GRPCClientAgent.name, () => { keyManager, logger, }); - queue = new Queue({ logger }); + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, logger, }); nodeManager = new NodeManager({ @@ -118,12 +122,12 @@ describe(GRPCClientAgent.name, () => { keyManager: keyManager, nodeGraph: nodeGraph, nodeConnectionManager: nodeConnectionManager, - queue, + taskManager, logger: logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl: acl, @@ -169,6 +173,8 @@ describe(GRPCClientAgent.name, () => { }); }, globalThis.defaultTimeout); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await testAgentUtils.closeTestAgentClient(client); await testAgentUtils.closeTestAgentServer(server); await vaultManager.stop(); @@ -176,13 +182,13 @@ describe(GRPCClientAgent.name, () => { await sigchain.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await nodeGraph.stop(); await gestaltGraph.stop(); await acl.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/agent/service/nodesCrossSignClaim.test.ts b/tests/agent/service/nodesCrossSignClaim.test.ts index 994ccd391..d405c0618 100644 --- a/tests/agent/service/nodesCrossSignClaim.test.ts +++ b/tests/agent/service/nodesCrossSignClaim.test.ts @@ -53,7 +53,7 @@ describe('nodesCrossSignClaim', () => { password, nodePath: path.join(dataDir, 'remoteNode'), keysConfig: { - rootKeyPairBits: 2048, + privateKeyPemOverride: globalRootKeyPems[1], }, seedNodes: {}, // Explicitly no seed nodes on startup networkConfig: { diff --git a/tests/agent/service/notificationsSend.test.ts b/tests/agent/service/notificationsSend.test.ts index 506941396..22d5eea14 100644 --- a/tests/agent/service/notificationsSend.test.ts +++ b/tests/agent/service/notificationsSend.test.ts @@ -8,7 +8,7 @@ import { createPrivateKey, createPublicKey } from 'crypto'; import { exportJWK, SignJWT } from 'jose'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -39,7 +39,7 @@ describe('notificationsSend', () => { let senderKeyManager: KeyManager; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -102,14 +102,16 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -120,12 +122,12 @@ describe('notificationsSend', () => { nodeGraph, nodeConnectionManager, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -156,11 +158,12 @@ describe('notificationsSend', () => { }); }, globalThis.defaultTimeout); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); await nodeConnectionManager.stop(); - await queue.stop(); await nodeManager.stop(); await sigchain.stop(); await sigchain.stop(); @@ -169,6 +172,7 @@ describe('notificationsSend', () => { await db.stop(); await senderKeyManager.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/bin/notifications/sendReadClear.test.ts b/tests/bin/notifications/sendReadClear.test.ts index f681e68bd..b70024554 100644 --- a/tests/bin/notifications/sendReadClear.test.ts +++ b/tests/bin/notifications/sendReadClear.test.ts @@ -315,6 +315,6 @@ describe('send/read/claim', () => { .map(JSON.parse); expect(readNotifications).toHaveLength(0); }, - globalThis.defaultTimeout * 2, + globalThis.defaultTimeout * 3, ); }); diff --git a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts index 0b9dd8c44..d4c64807e 100644 --- a/tests/client/service/gestaltsDiscoveryByIdentity.test.ts +++ b/tests/client/service/gestaltsDiscoveryByIdentity.test.ts @@ -6,7 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -45,7 +45,7 @@ describe('gestaltsDiscoveryByIdentity', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -113,14 +113,16 @@ describe('gestaltsDiscoveryByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -131,12 +133,12 @@ describe('gestaltsDiscoveryByIdentity', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -167,13 +169,14 @@ describe('gestaltsDiscoveryByIdentity', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); @@ -181,6 +184,7 @@ describe('gestaltsDiscoveryByIdentity', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/gestaltsDiscoveryByNode.test.ts b/tests/client/service/gestaltsDiscoveryByNode.test.ts index d0d77b431..0354ed66f 100644 --- a/tests/client/service/gestaltsDiscoveryByNode.test.ts +++ b/tests/client/service/gestaltsDiscoveryByNode.test.ts @@ -6,7 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import GestaltGraph from '@/gestalts/GestaltGraph'; import ACL from '@/acl/ACL'; import KeyManager from '@/keys/KeyManager'; @@ -46,7 +46,7 @@ describe('gestaltsDiscoveryByNode', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -114,14 +114,16 @@ describe('gestaltsDiscoveryByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -132,12 +134,12 @@ describe('gestaltsDiscoveryByNode', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.start(); discovery = await Discovery.createDiscovery({ db, keyManager, @@ -168,13 +170,14 @@ describe('gestaltsDiscoveryByNode', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await discovery.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); @@ -182,6 +185,7 @@ describe('gestaltsDiscoveryByNode', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts index 052295ed7..ea0bc370d 100644 --- a/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByIdentity.test.ts @@ -9,7 +9,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -58,7 +58,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; - let queue: Queue; + let taskManager: TaskManager; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -173,14 +173,16 @@ describe('gestaltsGestaltTrustByIdentity', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -191,12 +193,12 @@ describe('gestaltsGestaltTrustByIdentity', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -233,12 +235,13 @@ describe('gestaltsGestaltTrustByIdentity', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); @@ -247,6 +250,7 @@ describe('gestaltsGestaltTrustByIdentity', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/gestaltsGestaltTrustByNode.test.ts b/tests/client/service/gestaltsGestaltTrustByNode.test.ts index b32462ff5..200f45eb6 100644 --- a/tests/client/service/gestaltsGestaltTrustByNode.test.ts +++ b/tests/client/service/gestaltsGestaltTrustByNode.test.ts @@ -10,7 +10,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import Discovery from '@/discovery/Discovery'; @@ -103,7 +103,7 @@ describe('gestaltsGestaltTrustByNode', () => { let discovery: Discovery; let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; - let queue: Queue; + let taskManager: TaskManager; let nodeManager: NodeManager; let nodeConnectionManager: NodeConnectionManager; let nodeGraph: NodeGraph; @@ -181,14 +181,16 @@ describe('gestaltsGestaltTrustByNode', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -199,12 +201,12 @@ describe('gestaltsGestaltTrustByNode', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); await nodeManager.setNode(nodesUtils.decodeNodeId(nodeId)!, { host: node.proxy.getProxyHost(), port: node.proxy.getProxyPort(), @@ -241,12 +243,13 @@ describe('gestaltsGestaltTrustByNode', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await discovery.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); @@ -255,6 +258,7 @@ describe('gestaltsGestaltTrustByNode', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/identitiesClaim.test.ts b/tests/client/service/identitiesClaim.test.ts index 1dcba0893..5be95e093 100644 --- a/tests/client/service/identitiesClaim.test.ts +++ b/tests/client/service/identitiesClaim.test.ts @@ -9,7 +9,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import IdentitiesManager from '@/identities/IdentitiesManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -75,7 +75,7 @@ describe('identitiesClaim', () => { let testProvider: TestProvider; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -128,19 +128,21 @@ describe('identitiesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ connConnectTime: 2000, proxy, keyManager, nodeGraph, - queue, + taskManager, logger: logger.getChild('NodeConnectionManager'), }); - await queue.start(); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); const clientService = { identitiesClaim: identitiesClaim({ authenticate, @@ -165,16 +167,18 @@ describe('identitiesClaim', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); - await queue.stop(); await nodeGraph.stop(); await sigchain.stop(); await proxy.stop(); await identitiesManager.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/nodesAdd.test.ts b/tests/client/service/nodesAdd.test.ts index fe28906de..0d8ccb29f 100644 --- a/tests/client/service/nodesAdd.test.ts +++ b/tests/client/service/nodesAdd.test.ts @@ -5,7 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -34,7 +34,7 @@ describe('nodesAdd', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -82,14 +82,16 @@ describe('nodesAdd', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -100,12 +102,12 @@ describe('nodesAdd', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const clientService = { nodesAdd: nodesAdd({ authenticate, @@ -128,16 +130,18 @@ describe('nodesAdd', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await sigchain.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/nodesClaim.test.ts b/tests/client/service/nodesClaim.test.ts index 55fe371d7..824161c99 100644 --- a/tests/client/service/nodesClaim.test.ts +++ b/tests/client/service/nodesClaim.test.ts @@ -7,7 +7,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import NotificationsManager from '@/notifications/NotificationsManager'; import ACL from '@/acl/ACL'; @@ -65,7 +65,7 @@ describe('nodesClaim', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -118,14 +118,16 @@ describe('nodesClaim', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -136,12 +138,12 @@ describe('nodesClaim', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -174,11 +176,12 @@ describe('nodesClaim', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await nodeGraph.stop(); await notificationsManager.stop(); await sigchain.stop(); @@ -186,6 +189,7 @@ describe('nodesClaim', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/nodesFind.test.ts b/tests/client/service/nodesFind.test.ts index f8dd24b27..c58123a38 100644 --- a/tests/client/service/nodesFind.test.ts +++ b/tests/client/service/nodesFind.test.ts @@ -6,7 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -44,7 +44,7 @@ describe('nodesFind', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let sigchain: Sigchain; let proxy: Proxy; @@ -91,20 +91,22 @@ describe('nodesFind', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), }); - await queue.start(); await nodeConnectionManager.start({ nodeManager: {} as NodeManager }); + await taskManager.startProcessing(); const clientService = { nodesFind: nodesFind({ authenticate, @@ -126,15 +128,17 @@ describe('nodesFind', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/nodesPing.test.ts b/tests/client/service/nodesPing.test.ts index 5874207df..1e05faf36 100644 --- a/tests/client/service/nodesPing.test.ts +++ b/tests/client/service/nodesPing.test.ts @@ -5,7 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { Metadata } from '@grpc/grpc-js'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -43,7 +43,7 @@ describe('nodesPing', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let sigchain: Sigchain; @@ -91,14 +91,16 @@ describe('nodesPing', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -109,11 +111,11 @@ describe('nodesPing', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const clientService = { nodesPing: nodesPing({ authenticate, @@ -135,15 +137,17 @@ describe('nodesPing', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await sigchain.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); - await queue.stop(); await proxy.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/notificationsClear.test.ts b/tests/client/service/notificationsClear.test.ts index 64aa78eb8..45551e501 100644 --- a/tests/client/service/notificationsClear.test.ts +++ b/tests/client/service/notificationsClear.test.ts @@ -5,7 +5,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -41,7 +41,7 @@ describe('notificationsClear', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -95,14 +95,16 @@ describe('notificationsClear', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -113,12 +115,12 @@ describe('notificationsClear', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -150,18 +152,20 @@ describe('notificationsClear', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/notificationsRead.test.ts b/tests/client/service/notificationsRead.test.ts index a39860841..07faca128 100644 --- a/tests/client/service/notificationsRead.test.ts +++ b/tests/client/service/notificationsRead.test.ts @@ -6,7 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -116,7 +116,7 @@ describe('notificationsRead', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -170,14 +170,16 @@ describe('notificationsRead', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -188,12 +190,12 @@ describe('notificationsRead', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.start(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -225,6 +227,8 @@ describe('notificationsRead', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); @@ -232,11 +236,11 @@ describe('notificationsRead', () => { await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await proxy.stop(); await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/client/service/notificationsSend.test.ts b/tests/client/service/notificationsSend.test.ts index 3c5aecbce..0841ef7c2 100644 --- a/tests/client/service/notificationsSend.test.ts +++ b/tests/client/service/notificationsSend.test.ts @@ -6,7 +6,7 @@ import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { Metadata } from '@grpc/grpc-js'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import KeyManager from '@/keys/KeyManager'; import GRPCServer from '@/grpc/GRPCServer'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -52,7 +52,7 @@ describe('notificationsSend', () => { const authToken = 'abc123'; let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let notificationsManager: NotificationsManager; @@ -105,14 +105,16 @@ describe('notificationsSend', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -123,12 +125,12 @@ describe('notificationsSend', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); notificationsManager = await NotificationsManager.createNotificationsManager({ acl, @@ -159,18 +161,20 @@ describe('notificationsSend', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await grpcClient.destroy(); await grpcServer.stop(); await notificationsManager.stop(); await nodeGraph.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await sigchain.stop(); await proxy.stop(); await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/discovery/Discovery.test.ts b/tests/discovery/Discovery.test.ts index 2e59779b1..f99c45ee9 100644 --- a/tests/discovery/Discovery.test.ts +++ b/tests/discovery/Discovery.test.ts @@ -6,7 +6,7 @@ import path from 'path'; import os from 'os'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import Discovery from '@/discovery/Discovery'; import GestaltGraph from '@/gestalts/GestaltGraph'; @@ -22,6 +22,7 @@ import * as nodesUtils from '@/nodes/utils'; import * as claimsUtils from '@/claims/utils'; import * as discoveryErrors from '@/discovery/errors'; import * as keysUtils from '@/keys/utils'; +import * as grpcUtils from '@/grpc/utils/index'; import * as testNodesUtils from '../nodes/utils'; import TestProvider from '../identities/TestProvider'; import { globalRootKeyPems } from '../fixtures/globalRootKeyPems'; @@ -46,7 +47,7 @@ describe('Discovery', () => { let gestaltGraph: GestaltGraph; let identitiesManager: IdentitiesManager; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let db: DB; @@ -59,6 +60,8 @@ describe('Discovery', () => { let nodeB: PolykeyAgent; let identityId: IdentityId; beforeEach(async () => { + // Sets the global GRPC logger to the logger + grpcUtils.setLogger(logger); dataDir = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), ); @@ -124,14 +127,16 @@ describe('Discovery', () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 2000, connTimeoutTime: 2000, logger: logger.getChild('NodeConnectionManager'), @@ -142,12 +147,12 @@ describe('Discovery', () => { nodeConnectionManager, nodeGraph, sigchain, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); // Set up other gestalt nodeA = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -200,11 +205,12 @@ describe('Discovery', () => { await testProvider.publishClaim(identityId, claim); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await nodeA.stop(); await nodeB.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); - await queue.stop(); await nodeGraph.stop(); await proxy.stop(); await sigchain.stop(); @@ -213,6 +219,7 @@ describe('Discovery', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/nodes/NodeConnection.test.ts b/tests/nodes/NodeConnection.test.ts index 3afb53aa1..efa71300f 100644 --- a/tests/nodes/NodeConnection.test.ts +++ b/tests/nodes/NodeConnection.test.ts @@ -10,6 +10,7 @@ import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { destroyed } from '@matrixai/async-init'; +import TaskManager from '@/tasks/TaskManager'; import Proxy from '@/network/Proxy'; import NodeConnection from '@/nodes/NodeConnection'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -33,7 +34,6 @@ import * as nodesUtils from '@/nodes/utils'; import * as agentErrors from '@/agent/errors'; import * as grpcUtils from '@/grpc/utils'; import { timerStart } from '@/utils'; -import Queue from '@/nodes/Queue'; import * as testNodesUtils from './utils'; import * as grpcTestUtils from '../grpc/utils'; import * as agentTestUtils from '../agent/utils'; @@ -85,7 +85,6 @@ describe(`${NodeConnection.name} test`, () => { let serverKeyManager: KeyManager; let serverVaultManager: VaultManager; let serverNodeGraph: NodeGraph; - let serverQueue: Queue; let serverNodeConnectionManager: NodeConnectionManager; let serverNodeManager: NodeManager; let serverSigchain: Sigchain; @@ -111,6 +110,7 @@ describe(`${NodeConnection.name} test`, () => { let sourcePort: Port; let serverTLSConfig: TLSConfig; + let serverTaskManager: TaskManager; /** * Mock TCP server @@ -240,13 +240,16 @@ describe(`${NodeConnection.name} test`, () => { keyManager: serverKeyManager, logger, }); - - serverQueue = new Queue({ logger }); + serverTaskManager = await TaskManager.createTaskManager({ + db: serverDb, + lazy: true, + logger, + }); serverNodeConnectionManager = new NodeConnectionManager({ keyManager: serverKeyManager, nodeGraph: serverNodeGraph, proxy: serverProxy, - queue: serverQueue, + taskManager: serverTaskManager, logger, }); serverNodeManager = new NodeManager({ @@ -255,10 +258,9 @@ describe(`${NodeConnection.name} test`, () => { keyManager: serverKeyManager, nodeGraph: serverNodeGraph, nodeConnectionManager: serverNodeConnectionManager, - queue: serverQueue, + taskManager: serverTaskManager, logger: logger, }); - await serverQueue.start(); await serverNodeManager.start(); await serverNodeConnectionManager.start({ nodeManager: serverNodeManager }); serverVaultManager = await VaultManager.createVaultManager({ @@ -372,7 +374,6 @@ describe(`${NodeConnection.name} test`, () => { await serverNodeGraph.destroy(); await serverNodeConnectionManager.stop(); await serverNodeManager.stop(); - await serverQueue.stop(); await serverNotificationsManager.stop(); await serverNotificationsManager.destroy(); await agentTestUtils.closeTestAgentServer(agentServer); @@ -505,7 +506,7 @@ describe(`${NodeConnection.name} test`, () => { // Have a nodeConnection try to connect to it const killSelf = jest.fn(); nodeConnection = await NodeConnection.createNodeConnection({ - timer: timerStart(500), + timer: timerStart(2000), proxy: clientProxy, keyManager: clientKeyManager, logger: logger, diff --git a/tests/nodes/NodeConnectionManager.general.test.ts b/tests/nodes/NodeConnectionManager.general.test.ts index 28423dde9..e2bd36605 100644 --- a/tests/nodes/NodeConnectionManager.general.test.ts +++ b/tests/nodes/NodeConnectionManager.general.test.ts @@ -1,13 +1,14 @@ import type { NodeAddress, NodeBucket, NodeId, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; +import type TaskManager from '@/tasks/TaskManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; -import Queue from '@/nodes/Queue'; +import { PromiseCancellable } from '@matrixai/async-cancellable'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -76,7 +77,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { let db: DB; let proxy: Proxy; let nodeGraph: NodeGraph; - let queue: Queue; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -122,7 +122,14 @@ describe(`${NodeConnectionManager.name} general test`, () => { return IdInternal.create(idArray); }; - const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; + const dummyNodeManager = { + setNode: jest.fn(), + updateRefreshBucketDelay: jest.fn(), + } as unknown as NodeManager; + const dummyTaskManager: TaskManager = { + registerHandler: jest.fn(), + deregisterHandler: jest.fn(), + } as unknown as TaskManager; beforeAll(async () => { dataDir2 = await fs.promises.mkdtemp( @@ -197,10 +204,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), - }); - await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -226,7 +229,6 @@ describe(`${NodeConnectionManager.name} general test`, () => { }); afterEach(async () => { - await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -243,7 +245,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -270,13 +272,15 @@ describe(`${NodeConnectionManager.name} general test`, () => { NodeConnectionManager.prototype, 'pingNode', ); - mockedPingNode.mockImplementation(async () => true); + mockedPingNode.mockImplementation( + () => new PromiseCancellable((resolve) => resolve(true)), + ); // NodeConnectionManager under test const nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -325,7 +329,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -391,7 +395,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: logger.getChild('NodeConnectionManager'), }); @@ -463,7 +467,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -501,7 +505,7 @@ describe(`${NodeConnectionManager.name} general test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); @@ -522,4 +526,61 @@ describe(`${NodeConnectionManager.name} general test`, () => { await nodeConnectionManager?.stop(); } }); + test('getClosestGlobalNodes should skip recent offline nodes', async () => { + let nodeConnectionManager: NodeConnectionManager | undefined; + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + try { + nodeConnectionManager = new NodeConnectionManager({ + keyManager, + nodeGraph, + proxy, + taskManager: dummyTaskManager, + logger: nodeConnectionManagerLogger, + }); + await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + // Check two things, + // 1. existence of a node in the backoff map + // 2. getClosestGlobalNodes doesn't try to connect to offline node + + // Add fake data to `NodeGraph` + await nodeGraph.setNode(nodeId1, { + host: serverHost, + port: serverPort, + }); + await nodeGraph.setNode(nodeId2, { + host: serverHost, + port: serverPort, + }); + + // Making pings fail + mockedPingNode.mockImplementation( + () => new PromiseCancellable((resolve) => resolve(false)), + ); + await nodeConnectionManager.getClosestGlobalNodes(nodeId3, false); + expect(mockedPingNode).toHaveBeenCalled(); + + // Nodes 1 and 2 should exist in backoff map + // @ts-ignore: kidnap protected property + const backoffMap = nodeConnectionManager.nodesBackoffMap; + expect(backoffMap.has(nodeId1.toString())).toBeTrue(); + expect(backoffMap.has(nodeId2.toString())).toBeTrue(); + expect(backoffMap.has(nodeId3.toString())).toBeFalse(); + + // Next find node should skip offline nodes + mockedPingNode.mockClear(); + await nodeConnectionManager.getClosestGlobalNodes(nodeId3, true); + expect(mockedPingNode).not.toHaveBeenCalled(); + + // We can try connecting anyway + mockedPingNode.mockClear(); + await nodeConnectionManager.getClosestGlobalNodes(nodeId3, false); + expect(mockedPingNode).toHaveBeenCalled(); + } finally { + mockedPingNode.mockRestore(); + await nodeConnectionManager?.stop(); + } + }); }); diff --git a/tests/nodes/NodeConnectionManager.lifecycle.test.ts b/tests/nodes/NodeConnectionManager.lifecycle.test.ts index c9ff18cff..1c0792990 100644 --- a/tests/nodes/NodeConnectionManager.lifecycle.test.ts +++ b/tests/nodes/NodeConnectionManager.lifecycle.test.ts @@ -8,7 +8,8 @@ import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { withF } from '@matrixai/resources'; import { IdInternal } from '@matrixai/id'; -import Queue from '@/nodes/Queue'; +import { Timer } from '@matrixai/timer'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import NodeGraph from '@/nodes/NodeGraph'; @@ -18,7 +19,6 @@ import * as nodesUtils from '@/nodes/utils'; import * as nodesErrors from '@/nodes/errors'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; -import { timerStart } from '@/utils'; import { globalRootKeyPems } from '../fixtures/globalRootKeyPems'; describe(`${NodeConnectionManager.name} lifecycle test`, () => { @@ -77,7 +77,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { let proxy: Proxy; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let remoteNode1: PolykeyAgent; let remoteNode2: PolykeyAgent; @@ -155,10 +155,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, logger: logger.getChild('NodeGraph'), }); - queue = new Queue({ - logger: logger.getChild('queue'), + taskManager = await TaskManager.createTaskManager({ + db, + lazy: true, + logger, }); - await queue.start(); const tlsConfig = { keyPrivatePem: keyManager.getRootKeyPairPem().privateKey, certChainPem: keysUtils.certToPem(keyManager.getRootCert()), @@ -184,7 +185,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { }); afterEach(async () => { - await queue.stop(); + await taskManager.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await db.stop(); @@ -203,10 +204,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -229,10 +231,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -264,10 +267,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -293,11 +297,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); - + await taskManager.startProcessing(); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -346,11 +350,12 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, connConnectTime: 500, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // Add the dummy node await nodeGraph.setNode(dummyNodeId, { host: '125.0.0.1' as Host, @@ -388,10 +393,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; expect(connections.size).toBe(0); @@ -415,10 +421,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore accessing protected NodeConnectionMap const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -449,10 +456,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // @ts-ignore: kidnap connections const connections = nodeConnectionManager.connections; // @ts-ignore: kidnap connectionLocks @@ -483,10 +491,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); // Do testing // set up connections await nodeConnectionManager.withConnF(remoteNodeId1, nop); @@ -526,10 +535,11 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); + await taskManager.startProcessing(); await nodeConnectionManager.pingNode( remoteNodeId1, remoteNode1.proxy.getProxyHost(), @@ -547,18 +557,18 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); - + await taskManager.startProcessing(); // Pinging node expect( await nodeConnectionManager.pingNode( remoteNodeId1, '127.1.2.3' as Host, 55555 as Port, - timerStart(1000), + { timer: new Timer({ delay: 1000 }) }, ), ).toEqual(false); } finally { @@ -573,17 +583,17 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { keyManager, nodeGraph, proxy, - queue, + taskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); - + await taskManager.startProcessing(); expect( await nodeConnectionManager.pingNode( remoteNodeId1, remoteNode2.proxy.getProxyHost(), remoteNode2.proxy.getProxyPort(), - timerStart(1000), + { timer: new Timer({ delay: 1000 }) }, ), ).toEqual(false); @@ -592,7 +602,7 @@ describe(`${NodeConnectionManager.name} lifecycle test`, () => { remoteNodeId2, remoteNode1.proxy.getProxyHost(), remoteNode1.proxy.getProxyPort(), - timerStart(1000), + { timer: new Timer({ delay: 1000 }) }, ), ).toEqual(false); } finally { diff --git a/tests/nodes/NodeConnectionManager.seednodes.test.ts b/tests/nodes/NodeConnectionManager.seednodes.test.ts index 4c8d62440..033a2f87d 100644 --- a/tests/nodes/NodeConnectionManager.seednodes.test.ts +++ b/tests/nodes/NodeConnectionManager.seednodes.test.ts @@ -7,6 +7,7 @@ import os from 'os'; import { DB } from '@matrixai/db'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { IdInternal } from '@matrixai/id'; +import { PromiseCancellable } from '@matrixai/async-cancellable'; import NodeManager from '@/nodes/NodeManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; @@ -16,7 +17,8 @@ import Proxy from '@/network/Proxy'; import * as nodesUtils from '@/nodes/utils'; import * as keysUtils from '@/keys/utils'; import * as grpcUtils from '@/grpc/utils'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; +import { sleep } from '@/utils/index'; import { globalRootKeyPems } from '../fixtures/globalRootKeyPems'; describe(`${NodeConnectionManager.name} seed nodes test`, () => { @@ -76,11 +78,20 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { let remoteNodeId1: NodeId; let remoteNodeId2: NodeId; + let taskManager: TaskManager; const dummyNodeManager = { setNode: jest.fn(), refreshBucketQueueAdd: jest.fn(), } as unknown as NodeManager; + function createPromiseCancellable(result: T) { + return () => new PromiseCancellable((resolve) => resolve(result)); + } + + function createPromiseCancellableNop() { + return () => new PromiseCancellable((resolve) => resolve()); + } + beforeAll(async () => { dataDir2 = await fs.promises.mkdtemp( path.join(os.tmpdir(), 'polykey-test-'), @@ -150,6 +161,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }, }, }); + taskManager = await TaskManager.createTaskManager({ + db, + lazy: true, + logger: logger.getChild('taskManager'), + }); nodeGraph = await NodeGraph.createNodeGraph({ db, keyManager, @@ -187,6 +203,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { await keyManager.stop(); await keyManager.destroy(); await proxy.stop(); + await taskManager.stop(); }); // Seed nodes @@ -198,9 +215,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, - queue: new Queue({ - logger: logger.getChild('queue'), - }), + taskManager, seedNodes: dummySeedNodes, logger: logger, }); @@ -210,7 +225,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { logger, nodeConnectionManager, nodeGraph, - queue: {} as Queue, + taskManager, sigchain: {} as Sigchain, }); await nodeManager.start(); @@ -235,9 +250,7 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { keyManager, nodeGraph, proxy, - queue: new Queue({ - logger: logger.getChild('queue'), - }), + taskManager, seedNodes: dummySeedNodes, logger: logger, }); @@ -255,17 +268,16 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { test('should synchronise nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', ); - mockedRefreshBucket.mockImplementation(async () => {}); + mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); const mockedPingNode = jest.spyOn( NodeConnectionManager.prototype, 'pingNode', ); - mockedPingNode.mockImplementation(async () => true); + mockedPingNode.mockImplementation(createPromiseCancellable(true)); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -276,12 +288,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; - queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, seedNodes, logger: logger, }); @@ -291,10 +302,9 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { logger, nodeConnectionManager, nodeGraph, - queue, + taskManager, sigchain: {} as Sigchain, }); - await queue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -305,7 +315,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { port: serverPort, }); await nodeConnectionManager.start({ nodeManager }); - await nodeConnectionManager.syncNodeGraph(); + await taskManager.startProcessing(); + await nodeManager.syncNodeGraph(); expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); expect(await nodeGraph.getNode(dummyNodeId)).toBeUndefined(); @@ -314,23 +325,21 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedPingNode.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); - await queue?.stop(); } }); test('should call refreshBucket when syncing nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', ); - mockedRefreshBucket.mockImplementation(async () => {}); + mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); const mockedPingNode = jest.spyOn( NodeConnectionManager.prototype, 'pingNode', ); - mockedPingNode.mockImplementation(async () => true); + mockedPingNode.mockImplementation(createPromiseCancellable(true)); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -341,12 +350,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: remoteNode2.proxy.getProxyHost(), port: remoteNode2.proxy.getProxyPort(), }; - queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, seedNodes, logger: logger, }); @@ -357,9 +365,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, - queue, + taskManager, }); - await queue.start(); await nodeManager.start(); await remoteNode1.nodeGraph.setNode(nodeId1, { host: serverHost, @@ -370,31 +377,33 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { port: serverPort, }); await nodeConnectionManager.start({ nodeManager }); - await nodeConnectionManager.syncNodeGraph(); - await nodeManager.refreshBucketQueueDrained(); + await taskManager.startProcessing(); + await nodeManager.syncNodeGraph(); + await sleep(1000); expect(mockedRefreshBucket).toHaveBeenCalled(); } finally { mockedRefreshBucket.mockRestore(); mockedPingNode.mockRestore(); await nodeManager?.stop(); await nodeConnectionManager?.stop(); - await queue?.stop(); } }); test('should handle an offline seed node when synchronising nodeGraph', async () => { let nodeConnectionManager: NodeConnectionManager | undefined; let nodeManager: NodeManager | undefined; - let queue: Queue | undefined; const mockedRefreshBucket = jest.spyOn( NodeManager.prototype, 'refreshBucket', ); - mockedRefreshBucket.mockImplementation(async () => {}); + mockedRefreshBucket.mockImplementation(createPromiseCancellableNop()); const mockedPingNode = jest.spyOn( NodeConnectionManager.prototype, 'pingNode', ); - mockedPingNode.mockImplementation(async () => true); + mockedPingNode.mockImplementation((nodeId: NodeId) => { + if (dummyNodeId.equals(nodeId)) return createPromiseCancellable(false)(); + return createPromiseCancellable(true)(); + }); try { const seedNodes: SeedNodes = {}; seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { @@ -418,12 +427,11 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { host: serverHost, port: serverPort, }); - queue = new Queue({ logger }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue, + taskManager, seedNodes, connConnectTime: 500, logger: logger, @@ -435,13 +443,13 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { nodeConnectionManager, nodeGraph, sigchain: {} as Sigchain, - queue, + taskManager, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); // This should complete without error - await nodeConnectionManager.syncNodeGraph(); + await nodeManager.syncNodeGraph(true); // Information on remotes are found expect(await nodeGraph.getNode(nodeId1)).toBeDefined(); expect(await nodeGraph.getNode(nodeId2)).toBeDefined(); @@ -450,7 +458,6 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { mockedPingNode.mockRestore(); await nodeConnectionManager?.stop(); await nodeManager?.stop(); - await queue?.stop(); } }); test( @@ -473,9 +480,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { NodeConnectionManager.prototype, 'pingNode', ); - mockedPingNode.mockImplementation(async () => true); + mockedPingNode.mockImplementation(createPromiseCancellable(true)); try { - logger.setLevel(LogLevel.WARN); node1 = await PolykeyAgent.createPolykeyAgent({ nodePath: path.join(dataDir, 'node1'), password: 'password', @@ -507,10 +513,8 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { logger, }); - await node1.queue.drained(); - await node1.nodeManager.refreshBucketQueueDrained(); - await node2.queue.drained(); - await node2.nodeManager.refreshBucketQueueDrained(); + await node1.nodeManager.syncNodeGraph(true); + await node2.nodeManager.syncNodeGraph(true); const getAllNodes = async (node: PolykeyAgent) => { const nodes: Array = []; @@ -540,7 +544,6 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { expect(node2Nodes).toContain(nodeId1); } finally { mockedPingNode.mockRestore(); - logger.setLevel(LogLevel.WARN); await node1?.stop(); await node1?.destroy(); await node2?.stop(); @@ -549,4 +552,70 @@ describe(`${NodeConnectionManager.name} seed nodes test`, () => { }, globalThis.defaultTimeout * 2, ); + test( + 'refreshBucket delays should be reset after finding less than 20 nodes', + async () => { + // Using a single seed node we need to check that each entering node adds itself to the seed node. + // Also need to check that the new nodes can be seen in the network. + let node1: PolykeyAgent | undefined; + const seedNodes: SeedNodes = {}; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId1)] = { + host: remoteNode1.proxy.getProxyHost(), + port: remoteNode1.proxy.getProxyPort(), + }; + seedNodes[nodesUtils.encodeNodeId(remoteNodeId2)] = { + host: remoteNode2.proxy.getProxyHost(), + port: remoteNode2.proxy.getProxyPort(), + }; + const mockedPingNode = jest.spyOn( + NodeConnectionManager.prototype, + 'pingNode', + ); + mockedPingNode.mockImplementation(createPromiseCancellable(true)); + try { + node1 = await PolykeyAgent.createPolykeyAgent({ + nodePath: path.join(dataDir, 'node1'), + password: 'password', + networkConfig: { + proxyHost: localHost, + agentHost: localHost, + clientHost: localHost, + forwardHost: localHost, + }, + keysConfig: { + privateKeyPemOverride: globalRootKeyPems[3], + }, + seedNodes, + logger, + }); + + // Reset all the refresh bucket timers to a distinct time + for ( + let bucketIndex = 0; + bucketIndex < node1.nodeGraph.nodeIdBits; + bucketIndex++ + ) { + await node1.nodeManager.updateRefreshBucketDelay( + bucketIndex, + 10000, + true, + ); + } + + // Trigger a refreshBucket + await node1.nodeManager.refreshBucket(1); + + for await (const task of node1.taskManager.getTasks('asc', true, [ + 'refreshBucket', + ])) { + expect(task.delay).toBeGreaterThanOrEqual(50000); + } + } finally { + mockedPingNode.mockRestore(); + await node1?.stop(); + await node1?.destroy(); + } + }, + globalThis.defaultTimeout * 2, + ); }); diff --git a/tests/nodes/NodeConnectionManager.termination.test.ts b/tests/nodes/NodeConnectionManager.termination.test.ts index 5436a9fbb..87b237d62 100644 --- a/tests/nodes/NodeConnectionManager.termination.test.ts +++ b/tests/nodes/NodeConnectionManager.termination.test.ts @@ -2,7 +2,7 @@ import type { AddressInfo } from 'net'; import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; -import type Queue from '@/nodes/Queue'; +import type TaskManager from 'tasks/TaskManager'; import net from 'net'; import fs from 'fs'; import path from 'path'; @@ -84,6 +84,10 @@ describe(`${NodeConnectionManager.name} termination test`, () => { let tlsConfig2: TLSConfig; const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; + const dummyTaskManager: TaskManager = { + registerHandler: jest.fn(), + deregisterHandler: jest.fn(), + } as unknown as TaskManager; beforeEach(async () => { dataDir = await fs.promises.mkdtemp( @@ -240,7 +244,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -281,7 +285,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -325,7 +329,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -372,7 +376,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -433,7 +437,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -516,7 +520,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -592,7 +596,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -673,7 +677,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); @@ -754,7 +758,7 @@ describe(`${NodeConnectionManager.name} termination test`, () => { keyManager, nodeGraph, proxy: defaultProxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: logger, connConnectTime: 2000, }); diff --git a/tests/nodes/NodeConnectionManager.timeout.test.ts b/tests/nodes/NodeConnectionManager.timeout.test.ts index d356f1f55..d06d2a019 100644 --- a/tests/nodes/NodeConnectionManager.timeout.test.ts +++ b/tests/nodes/NodeConnectionManager.timeout.test.ts @@ -1,7 +1,7 @@ import type { NodeId, NodeIdString, SeedNodes } from '@/nodes/types'; import type { Host, Port } from '@/network/types'; import type NodeManager from 'nodes/NodeManager'; -import type Queue from '@/nodes/Queue'; +import type TaskManager from '@/tasks/TaskManager'; import fs from 'fs'; import path from 'path'; import os from 'os'; @@ -77,6 +77,10 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { let remoteNodeId2: NodeId; const dummyNodeManager = { setNode: jest.fn() } as unknown as NodeManager; + const dummyTaskManager: TaskManager = { + registerHandler: jest.fn(), + deregisterHandler: jest.fn(), + } as unknown as TaskManager; beforeAll(async () => { dataDir2 = await fs.promises.mkdtemp( @@ -188,7 +192,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, connTimeoutTime: 500, logger: nodeConnectionManagerLogger, }); @@ -226,7 +230,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, connTimeoutTime: 1000, logger: nodeConnectionManagerLogger, }); @@ -280,7 +284,7 @@ describe(`${NodeConnectionManager.name} timeout test`, () => { keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager: dummyTaskManager, logger: nodeConnectionManagerLogger, }); await nodeConnectionManager.start({ nodeManager: dummyNodeManager }); diff --git a/tests/nodes/NodeManager.test.ts b/tests/nodes/NodeManager.test.ts index f2ed4dfb5..9738f902d 100644 --- a/tests/nodes/NodeManager.test.ts +++ b/tests/nodes/NodeManager.test.ts @@ -1,13 +1,16 @@ import type { CertificatePem, KeyPairPem, PublicKeyPem } from '@/keys/types'; import type { Host, Port } from '@/network/types'; import type { NodeId, NodeAddress } from '@/nodes/types'; +import type { Task } from '@/tasks/types'; import os from 'os'; import path from 'path'; import fs from 'fs'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import UTP from 'utp-native'; -import Queue from '@/nodes/Queue'; +import { Timer } from '@matrixai/timer'; +import { PromiseCancellable } from '@matrixai/async-cancellable'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import KeyManager from '@/keys/KeyManager'; import * as keysUtils from '@/keys/utils'; @@ -17,10 +20,9 @@ import NodeManager from '@/nodes/NodeManager'; import Proxy from '@/network/Proxy'; import Sigchain from '@/sigchain/Sigchain'; import * as claimsUtils from '@/claims/utils'; -import { promise, promisify, sleep } from '@/utils'; +import { never, promise, promisify, sleep } from '@/utils'; import * as nodesUtils from '@/nodes/utils'; import * as utilsPB from '@/proto/js/polykey/v1/utils/utils_pb'; -import * as nodesErrors from '@/nodes/errors'; import * as nodesTestUtils from './utils'; import { generateNodeIdForBucket } from './utils'; import { globalRootKeyPems } from '../fixtures/globalRootKeyPems'; @@ -32,7 +34,7 @@ describe(`${NodeManager.name} test`, () => { ]); let dataDir: string; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let proxy: Proxy; let keyManager: KeyManager; @@ -108,24 +110,31 @@ describe(`${NodeManager.name} test`, () => { keyManager, logger, }); - queue = new Queue({ logger }); + taskManager = await TaskManager.createTaskManager({ + activeLimit: 0, + db, + lazy: true, + logger, + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, - queue, + taskManager, proxy, logger, }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); mockedPingNode.mockClear(); mockedPingNode.mockImplementation(async (_) => true); await nodeConnectionManager.stop(); - await queue.stop(); await nodeGraph.stop(); await nodeGraph.destroy(); await sigchain.stop(); await sigchain.destroy(); + await taskManager.stop(); await db.stop(); await db.destroy(); await keyManager.stop(); @@ -168,17 +177,20 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - queue, + taskManager, logger, }); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); // Set server node offline await server.stop(); // Check if active // Case 1: cannot establish new connection, so offline - const active1 = await nodeManager.pingNode(serverNodeId); + const active1 = await nodeManager.pingNode(serverNodeId, undefined, { + timer: new Timer({ delay: 1000 }), + }); expect(active1).toBe(false); // Bring server node online await server.start({ @@ -195,17 +207,18 @@ describe(`${NodeManager.name} test`, () => { await nodeGraph.setNode(serverNodeId, serverNodeAddress); // Check if active // Case 2: can establish new connection, so online - const active2 = await nodeManager.pingNode(serverNodeId); + const active2 = await nodeManager.pingNode(serverNodeId, undefined, { + timer: new Timer({ delay: 1000 }), + }); expect(active2).toBe(true); // Turn server node offline again await server.stop(); await server.destroy(); - // Give time for the ping buffers to send and wait for timeout on - // existing connection - await sleep(30000); // FIXME: remove this sleep // Check if active // Case 3: pre-existing connection no longer active, so offline - const active3 = await nodeManager.pingNode(serverNodeId); + const active3 = await nodeManager.pingNode(serverNodeId, undefined, { + timer: new Timer({ delay: 1000 }), + }); expect(active3).toBe(false); } finally { // Clean up @@ -244,11 +257,12 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - queue, + taskManager, logger, }); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); // We want to get the public key of the server const key = await nodeManager.getPublicKey(serverNodeId); @@ -435,11 +449,12 @@ describe(`${NodeManager.name} test`, () => { keyManager, nodeGraph, nodeConnectionManager, - queue, + taskManager, logger, }); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); await nodeGraph.setNode(xNodeId, xNodeAddress); @@ -455,20 +470,19 @@ describe(`${NodeManager.name} test`, () => { }); }); test('should add a node when bucket has room', async () => { - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const localNodeId = keyManager.getNodeId(); const bucketIndex = 100; const nodeId = nodesTestUtils.generateNodeIdForBucket( @@ -482,24 +496,22 @@ describe(`${NodeManager.name} test`, () => { expect(bucket).toHaveLength(1); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should update a node if node exists', async () => { - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const localNodeId = keyManager.getNodeId(); const bucketIndex = 100; const nodeId = nodesTestUtils.generateNodeIdForBucket( @@ -525,24 +537,23 @@ describe(`${NodeManager.name} test`, () => { expect(newNodeData.lastUpdated).not.toEqual(nodeData.lastUpdated); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should not add node if bucket is full and old node is alive', async () => { - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); + const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const localNodeId = keyManager.getNodeId(); const bucketIndex = 100; // Creating 20 nodes in bucket @@ -559,7 +570,6 @@ describe(`${NodeManager.name} test`, () => { bucketIndex, ); // Mocking ping - const nodeManagerPingMock = jest.spyOn(NodeManager.prototype, 'pingNode'); nodeManagerPingMock.mockResolvedValue(true); const oldestNodeId = (await nodeGraph.getOldestNode(bucketIndex)).pop(); const oldestNode = await nodeGraph.getNode(oldestNodeId!); @@ -576,27 +586,25 @@ describe(`${NodeManager.name} test`, () => { // Oldest node was updated const oldestNodeNew = await nodeGraph.getNode(oldestNodeId!); expect(oldestNodeNew!.lastUpdated).not.toEqual(oldestNode!.lastUpdated); - nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await queue.stop(); + nodeManagerPingMock.mockRestore(); } }); test('should add node if bucket is full, old node is alive and force is set', async () => { - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const localNodeId = keyManager.getNodeId(); const bucketIndex = 100; // Creating 20 nodes in bucket @@ -620,7 +628,7 @@ describe(`${NodeManager.name} test`, () => { await nodeManager.setNode( nodeId, { port: 55555 } as NodeAddress, - false, + undefined, true, ); // Bucket still contains max nodes @@ -635,24 +643,22 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should add node if bucket is full and old node is dead', async () => { - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const localNodeId = keyManager.getNodeId(); const bucketIndex = 100; // Creating 20 nodes in bucket @@ -683,25 +689,23 @@ describe(`${NodeManager.name} test`, () => { nodeManagerPingMock.mockRestore(); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should add node when an incoming connection is established', async () => { let server: PolykeyAgent | undefined; - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: {} as NodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); server = await PolykeyAgent.createPolykeyAgent({ password: 'password', nodePath: path.join(dataDir, 'server'), @@ -742,25 +746,23 @@ describe(`${NodeManager.name} test`, () => { await server?.stop(); await server?.destroy(); await nodeManager.stop(); - await queue.stop(); } }); test('should not add nodes to full bucket if pings succeeds', async () => { mockedPingNode.mockImplementation(async (_) => true); - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, + taskManager, logger, }); try { - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const nodeId = keyManager.getNodeId(); const address = { host: localhost, port }; // Let's fill a bucket @@ -776,33 +778,31 @@ describe(`${NodeManager.name} test`, () => { }; // Pings succeed, node not added - mockedPingNode.mockImplementation(async (_) => true); + mockedPingNode.mockImplementation(async () => true); const newNode = generateNodeIdForBucket(nodeId, 100, 21); - await nodeManager.setNode(newNode, address); + await nodeManager.setNode(newNode, address, true); expect(await listBucket(100)).not.toContain( nodesUtils.encodeNodeId(newNode), ); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should add nodes to full bucket if pings fail', async () => { mockedPingNode.mockImplementation(async (_) => true); - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const nodeId = keyManager.getNodeId(); const address = { host: localhost, port }; // Let's fill a bucket @@ -822,17 +822,15 @@ describe(`${NodeManager.name} test`, () => { const newNode1 = generateNodeIdForBucket(nodeId, 100, 22); const newNode2 = generateNodeIdForBucket(nodeId, 100, 23); const newNode3 = generateNodeIdForBucket(nodeId, 100, 24); - await nodeManager.setNode(newNode1, address); - await nodeManager.setNode(newNode2, address); - await nodeManager.setNode(newNode3, address); - await queue.drained(); + await nodeManager.setNode(newNode1, address, true); + await nodeManager.setNode(newNode2, address, true); + await nodeManager.setNode(newNode3, address, true); const list = await listBucket(100); expect(list).toContain(nodesUtils.encodeNodeId(newNode1)); expect(list).toContain(nodesUtils.encodeNodeId(newNode2)); expect(list).toContain(nodesUtils.encodeNodeId(newNode3)); } finally { await nodeManager.stop(); - await queue.stop(); } }); test('should not block when bucket is full', async () => { @@ -842,20 +840,19 @@ describe(`${NodeManager.name} test`, () => { logger, }); mockedPingNode.mockImplementation(async (_) => true); - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph: tempNodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); try { await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); const nodeId = keyManager.getNodeId(); const address = { host: localhost, port }; // Let's fill a bucket @@ -873,65 +870,25 @@ describe(`${NodeManager.name} test`, () => { const newNode4 = generateNodeIdForBucket(nodeId, 100, 25); // Set manually to non-blocking await expect( - nodeManager.setNode(newNode4, address, false), + nodeManager.setNode(newNode4, address), ).resolves.toBeUndefined(); delayPing.resolveP(); - await queue.drained(); } finally { await nodeManager.stop(); - await queue.stop(); await tempNodeGraph.stop(); await tempNodeGraph.destroy(); } }); - test('should block when blocking is set to true', async () => { - mockedPingNode.mockImplementation(async (_) => true); - const queue = new Queue({ logger }); - const nodeManager = new NodeManager({ - db, - sigchain: {} as Sigchain, - keyManager, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - queue, - logger, - }); - await queue.start(); - await nodeManager.start(); - try { - await nodeConnectionManager.start({ nodeManager }); - const nodeId = keyManager.getNodeId(); - const address = { host: localhost, port }; - // Let's fill a bucket - for (let i = 0; i < nodeGraph.nodeBucketLimit; i++) { - const newNode = generateNodeIdForBucket(nodeId, 100, i); - await nodeManager.setNode(newNode, address); - } - - // Set node can block - mockedPingNode.mockClear(); - mockedPingNode.mockImplementation(async () => true); - const newNode5 = generateNodeIdForBucket(nodeId, 100, 25); - await expect( - nodeManager.setNode(newNode5, address, true), - ).resolves.toBeUndefined(); - expect(mockedPingNode).toBeCalled(); - } finally { - await nodeManager.stop(); - await queue.stop(); - } - }); test('should update deadline when updating a bucket', async () => { const refreshBucketTimeout = 100000; - const queue = new Queue({ logger }); const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, - refreshBucketTimerDefault: refreshBucketTimeout, + taskManager, + refreshBucketDelay: refreshBucketTimeout, logger, }); const mockRefreshBucket = jest.spyOn( @@ -939,116 +896,78 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { - mockRefreshBucket.mockImplementation(async () => {}); - await queue.start(); + mockRefreshBucket.mockImplementation( + () => new PromiseCancellable((resolve) => resolve()), + ); + await taskManager.startProcessing(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); - // @ts-ignore: kidnap map - const deadlineMap = nodeManager.refreshBucketDeadlineMap; // Getting starting value - const bucket = 0; - const startingDeadline = deadlineMap.get(bucket); + const bucketIndex = 100; + let refreshBucketTask: Task | undefined; + for await (const task of taskManager.getTasks('asc', true, [ + nodeManager.basePath, + nodeManager.refreshBucketHandlerId, + `${bucketIndex}`, + ])) { + refreshBucketTask = task; + } + if (refreshBucketTask == null) never(); const nodeId = nodesTestUtils.generateNodeIdForBucket( keyManager.getNodeId(), - bucket, + bucketIndex, ); - await sleep(1000); + await sleep(100); await nodeManager.setNode(nodeId, {} as NodeAddress); // Deadline should be updated - const newDeadline = deadlineMap.get(bucket); - expect(newDeadline).not.toEqual(startingDeadline); - } finally { - mockRefreshBucket.mockRestore(); - await nodeManager.stop(); - await queue.stop(); - } - }); - test('should add buckets to the queue when exceeding deadline', async () => { - const refreshBucketTimeout = 100; - const queue = new Queue({ logger }); - const nodeManager = new NodeManager({ - db, - sigchain: {} as Sigchain, - keyManager, - nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - queue, - refreshBucketTimerDefault: refreshBucketTimeout, - logger, - }); - const mockRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); - const mockRefreshBucketQueueAdd = jest.spyOn( - NodeManager.prototype, - 'refreshBucketQueueAdd', - ); - try { - mockRefreshBucket.mockImplementation(async () => {}); - await queue.start(); - await nodeManager.start(); - await nodeConnectionManager.start({ nodeManager }); - // Getting starting value - expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(0); - await sleep(200); - expect(mockRefreshBucketQueueAdd).toHaveBeenCalledTimes(256); + let refreshBucketTaskUpdated: Task | undefined; + for await (const task of taskManager.getTasks('asc', true, [ + nodeManager.basePath, + nodeManager.refreshBucketHandlerId, + `${bucketIndex}`, + ])) { + refreshBucketTaskUpdated = task; + } + if (refreshBucketTaskUpdated == null) never(); + expect(refreshBucketTaskUpdated.delay).not.toEqual( + refreshBucketTask.delay, + ); } finally { - mockRefreshBucketQueueAdd.mockRestore(); + await taskManager.stopProcessing(); + await taskManager.stopTasks(); mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await queue.stop(); } }); - test('should digest queue to refresh buckets', async () => { - const refreshBucketTimeout = 1000000; - const queue = new Queue({ logger }); + test('refreshBucket should not throw errors when network is empty', async () => { const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, - nodeConnectionManager: dummyNodeConnectionManager, - queue, - refreshBucketTimerDefault: refreshBucketTimeout, + nodeConnectionManager, + taskManager, + refreshBucketDelay: 10000000, logger, }); - const mockRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); + await nodeConnectionManager.start({ nodeManager }); + await taskManager.startProcessing(); try { - await queue.start(); - await nodeManager.start(); - await nodeConnectionManager.start({ nodeManager }); - mockRefreshBucket.mockImplementation(async () => {}); - nodeManager.refreshBucketQueueAdd(1); - nodeManager.refreshBucketQueueAdd(2); - nodeManager.refreshBucketQueueAdd(3); - nodeManager.refreshBucketQueueAdd(4); - nodeManager.refreshBucketQueueAdd(5); - await nodeManager.refreshBucketQueueDrained(); - expect(mockRefreshBucket).toHaveBeenCalledTimes(5); - - // Add buckets to queue - // check if refresh buckets was called + await expect(nodeManager.refreshBucket(100)).resolves.not.toThrow(); } finally { - mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await queue.stop(); } }); - test('should abort refreshBucket queue when stopping', async () => { - const refreshBucketTimeout = 1000000; - const queue = new Queue({ logger }); + test('refreshBucket tasks should have spread delays', async () => { + const refreshBucketTimeout = 100000; const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, - refreshBucketTimerDefault: refreshBucketTimeout, + taskManager, + refreshBucketDelay: refreshBucketTimeout, logger, }); const mockRefreshBucket = jest.spyOn( @@ -1056,104 +975,81 @@ describe(`${NodeManager.name} test`, () => { 'refreshBucket', ); try { - await queue.start(); - await nodeManager.start(); - await nodeConnectionManager.start({ nodeManager }); mockRefreshBucket.mockImplementation( - async (bucket, options: { signal?: AbortSignal } = {}) => { - const { signal } = { ...options }; - const prom = promise(); - signal?.addEventListener('abort', () => - prom.rejectP(new nodesErrors.ErrorNodeAborted()), - ); - await prom.p; - }, + () => new PromiseCancellable((resolve) => resolve()), ); - nodeManager.refreshBucketQueueAdd(1); - nodeManager.refreshBucketQueueAdd(2); - nodeManager.refreshBucketQueueAdd(3); - nodeManager.refreshBucketQueueAdd(4); - nodeManager.refreshBucketQueueAdd(5); - await nodeManager.stop(); + await taskManager.startProcessing(); + await nodeManager.start(); + await nodeConnectionManager.start({ nodeManager }); + // Getting starting value + const startingDelay = new Set(); + for await (const task of taskManager.getTasks('asc', true, [ + 'refreshBucket', + ])) { + startingDelay.add(task.delay); + } + expect(startingDelay.size).not.toBe(1); + // Updating delays should have spread + for ( + let bucketIndex = 0; + bucketIndex < nodeGraph.nodeIdBits; + bucketIndex++ + ) { + await nodeManager.updateRefreshBucketDelay( + bucketIndex, + undefined, + true, + ); + } + const updatedDelay = new Set(); + for await (const task of taskManager.getTasks('asc', true, [ + 'refreshBucket', + ])) { + updatedDelay.add(task.delay); + } + expect(updatedDelay.size).not.toBe(1); } finally { mockRefreshBucket.mockRestore(); await nodeManager.stop(); - await queue.stop(); } }); - test('should pause, resume and stop queue while paused', async () => { - const refreshBucketTimeout = 1000000; - const queue = new Queue({ logger }); + test('Stopping nodeManager should cancel all ephemeral tasks', async () => { const nodeManager = new NodeManager({ db, sigchain: {} as Sigchain, keyManager, nodeGraph, nodeConnectionManager: dummyNodeConnectionManager, - queue, - refreshBucketTimerDefault: refreshBucketTimeout, + taskManager, logger, }); - const mockRefreshBucket = jest.spyOn( - NodeManager.prototype, - 'refreshBucket', - ); try { - logger.setLevel(LogLevel.WARN); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); - mockRefreshBucket.mockImplementation( - async (bucket, options: { signal?: AbortSignal } = {}) => { - const { signal } = { ...options }; - const prom = promise(); - const timer = setTimeout(prom.resolveP, 10); - signal?.addEventListener('abort', () => { - clearTimeout(timer); - prom.rejectP(new nodesErrors.ErrorNodeAborted()); - }); - await prom.p; - }, - ); - nodeManager.refreshBucketQueueAdd(1); - nodeManager.refreshBucketQueueAdd(2); - nodeManager.refreshBucketQueueAdd(3); - nodeManager.refreshBucketQueueAdd(4); - nodeManager.refreshBucketQueueAdd(5); - // Can pause and resume - nodeManager.refreshBucketQueuePause(); - nodeManager.refreshBucketQueueAdd(6); - nodeManager.refreshBucketQueueAdd(7); - nodeManager.refreshBucketQueueResume(); - await nodeManager.refreshBucketQueueDrained(); + // Creating dummy tasks + const task1 = await taskManager.scheduleTask({ + handlerId: nodeManager.pingAndSetNodeHandlerId, + lazy: false, + path: [nodeManager.basePath], + }); + const task2 = await taskManager.scheduleTask({ + handlerId: nodeManager.pingAndSetNodeHandlerId, + lazy: false, + path: [nodeManager.basePath], + }); - // Can pause and stop - nodeManager.refreshBucketQueuePause(); - nodeManager.refreshBucketQueueAdd(8); - nodeManager.refreshBucketQueueAdd(9); - nodeManager.refreshBucketQueueAdd(10); - await nodeManager.stop(); - } finally { - mockRefreshBucket.mockRestore(); + // Stopping nodeManager should cancel any nodeManager tasks await nodeManager.stop(); - await queue.stop(); - } - }); - test('refreshBucket should not throw errors when network is empty', async () => { - const nodeManager = new NodeManager({ - db, - sigchain: {} as Sigchain, - keyManager, - nodeGraph, - nodeConnectionManager, - queue, - refreshBucketTimerDefault: 10000000, - logger, - }); - await nodeConnectionManager.start({ nodeManager }); - try { - await expect(nodeManager.refreshBucket(100)).resolves.not.toThrow(); + const tasks: Array = []; + for await (const task of taskManager.getTasks('asc', true, [ + nodeManager.basePath, + ])) { + tasks.push(task); + } + expect(tasks.length).toEqual(0); + await expect(task1.promise()).toReject(); + await expect(task2.promise()).toReject(); } finally { await nodeManager.stop(); } diff --git a/tests/notifications/NotificationsManager.test.ts b/tests/notifications/NotificationsManager.test.ts index 0a4d23f3e..a01a577db 100644 --- a/tests/notifications/NotificationsManager.test.ts +++ b/tests/notifications/NotificationsManager.test.ts @@ -8,7 +8,7 @@ import path from 'path'; import Logger, { LogLevel, StreamHandler } from '@matrixai/logger'; import { DB } from '@matrixai/db'; import { IdInternal } from '@matrixai/id'; -import Queue from '@/nodes/Queue'; +import TaskManager from '@/tasks/TaskManager'; import PolykeyAgent from '@/PolykeyAgent'; import ACL from '@/acl/ACL'; import Sigchain from '@/sigchain/Sigchain'; @@ -49,7 +49,7 @@ describe('NotificationsManager', () => { let acl: ACL; let db: DB; let nodeGraph: NodeGraph; - let queue: Queue; + let taskManager: TaskManager; let nodeConnectionManager: NodeConnectionManager; let nodeManager: NodeManager; let keyManager: KeyManager; @@ -106,12 +106,16 @@ describe('NotificationsManager', () => { keyManager, logger, }); - queue = new Queue({ logger }); + taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, + }); nodeConnectionManager = new NodeConnectionManager({ nodeGraph, keyManager, proxy, - queue, + taskManager, logger, }); nodeManager = new NodeManager({ @@ -120,12 +124,12 @@ describe('NotificationsManager', () => { sigchain, nodeConnectionManager, nodeGraph, - queue, + taskManager, logger, }); - await queue.start(); await nodeManager.start(); await nodeConnectionManager.start({ nodeManager }); + await taskManager.start(); // Set up node for receiving notifications receiver = await PolykeyAgent.createPolykeyAgent({ password: password, @@ -144,8 +148,9 @@ describe('NotificationsManager', () => { }); }, globalThis.defaultTimeout); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await receiver.stop(); - await queue.stop(); await nodeConnectionManager.stop(); await nodeManager.stop(); await nodeGraph.stop(); @@ -154,6 +159,7 @@ describe('NotificationsManager', () => { await acl.stop(); await db.stop(); await keyManager.stop(); + await taskManager.stop(); await fs.promises.rm(dataDir, { force: true, recursive: true, diff --git a/tests/vaults/VaultManager.test.ts b/tests/vaults/VaultManager.test.ts index 762010273..0e9ff57e5 100644 --- a/tests/vaults/VaultManager.test.ts +++ b/tests/vaults/VaultManager.test.ts @@ -8,7 +8,6 @@ import type { import type NotificationsManager from '@/notifications/NotificationsManager'; import type { Host, Port, TLSConfig } from '@/network/types'; import type NodeManager from '@/nodes/NodeManager'; -import type Queue from '@/nodes/Queue'; import fs from 'fs'; import os from 'os'; import path from 'path'; @@ -18,6 +17,7 @@ import { DB } from '@matrixai/db'; import { destroyed, running } from '@matrixai/async-init'; import git from 'isomorphic-git'; import { RWLockWriter } from '@matrixai/async-locks'; +import TaskManager from '@/tasks/TaskManager'; import ACL from '@/acl/ACL'; import GestaltGraph from '@/gestalts/GestaltGraph'; import NodeConnectionManager from '@/nodes/NodeConnectionManager'; @@ -480,6 +480,7 @@ describe('VaultManager', () => { let remoteKeynode1: PolykeyAgent, remoteKeynode2: PolykeyAgent; let localNodeId: NodeId; let localNodeIdEncoded: NodeIdEncoded; + let taskManager: TaskManager; beforeAll(async () => { // Creating agents @@ -580,18 +581,22 @@ describe('VaultManager', () => { serverHost: localHost, serverPort: port, }); - + taskManager = await TaskManager.createTaskManager({ + db, + lazy: true, + logger, + }); nodeConnectionManager = new NodeConnectionManager({ keyManager, nodeGraph, proxy, - queue: {} as Queue, + taskManager, logger, }); await nodeConnectionManager.start({ nodeManager: { setNode: jest.fn() } as unknown as NodeManager, }); - + await taskManager.startProcessing(); await nodeGraph.setNode(remoteKeynode1Id, { host: remoteKeynode1.proxy.getProxyHost(), port: remoteKeynode1.proxy.getProxyPort(), @@ -602,6 +607,8 @@ describe('VaultManager', () => { }); }); afterEach(async () => { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await remoteKeynode1.vaultManager.destroyVault(remoteVaultId); await nodeConnectionManager.stop(); await proxy.stop(); @@ -609,6 +616,7 @@ describe('VaultManager', () => { await nodeGraph.destroy(); await keyManager.stop(); await keyManager.destroy(); + await taskManager.stop(); }); test('clone vaults from a remote keynode using a vault name', async () => { @@ -1510,17 +1518,23 @@ describe('VaultManager', () => { serverHost: localHost, serverPort: port, }); + const taskManager = await TaskManager.createTaskManager({ + db, + logger, + lazy: true, + }); const nodeConnectionManager = new NodeConnectionManager({ keyManager, logger, nodeGraph, proxy, - queue: {} as Queue, + taskManager, connConnectTime: 1000, }); await nodeConnectionManager.start({ nodeManager: { setNode: jest.fn() } as unknown as NodeManager, }); + await taskManager.startProcessing(); const vaultManager = await VaultManager.createVaultManager({ vaultsPath, keyManager, @@ -1602,6 +1616,8 @@ describe('VaultManager', () => { ]); expect(vaults[vaultsUtils.encodeVaultId(vault3)]).toBeUndefined(); } finally { + await taskManager.stopProcessing(); + await taskManager.stopTasks(); await vaultManager.stop(); await vaultManager.destroy(); await nodeConnectionManager.stop(); @@ -1614,6 +1630,7 @@ describe('VaultManager', () => { await acl.destroy(); await remoteAgent.stop(); await remoteAgent.destroy(); + await taskManager.stop(); } }); test('stopping respects locks', async () => {