diff --git a/src/API.ts b/src/API.ts index 0b3b95f8..25ece1ea 100644 --- a/src/API.ts +++ b/src/API.ts @@ -170,7 +170,7 @@ export function registerRoutes(server: FastifyInstance 0 || combineAccountsData.receipts.length > 0) { Logger.mainLogger.debug('Found combine accountsData', combineAccountsData.accounts.length) diff --git a/src/Data/Cycles.ts b/src/Data/Cycles.ts index fc495328..5c77f964 100644 --- a/src/Data/Cycles.ts +++ b/src/Data/Cycles.ts @@ -14,11 +14,11 @@ import { clearDataSenders, dataSenders, getConsensusRadius, - nodesPerConsensusGroup, - nodesPerEdge, - subscribeConsensorsByConsensusRadius, + subscribeConsensorsByConsensusRadiusWithDataSenders, subscriptionCycleData, - unsubscribeDataSender, + unsubscribeDataSenderWithDataSenders, + nodesPerConsensusGroup, + nodesPerEdge } from './Data' import * as Utils from '../Utils' import { config } from '../Config' @@ -233,17 +233,17 @@ export const validateCycleData = ( } const cycleRecordWithoutMarker = { ...cycleRecord } - delete cycleRecordWithoutMarker.marker + delete (cycleRecordWithoutMarker as any).marker - const computedMarker = computeCycleMarker(cycleRecordWithoutMarker) + const computedMarker = computeCycleMarker(cycleRecordWithoutMarker as P2PTypes.CycleCreatorTypes.CycleRecord) Logger.mainLogger.debug( 'validateCycleData: Computed marker: ', computedMarker, ' Provided marker: ', - cycleRecord.marker + (cycleRecord as any).marker ) - if (computedMarker !== cycleRecord.marker) { + if (computedMarker !== (cycleRecord as any).marker) { Logger.mainLogger.error('Invalid Cycle Record: cycle marker does not match with the computed marker') return false } @@ -372,7 +372,7 @@ function updateNodeList(cycle: P2PTypes.CycleCreatorTypes.CycleData): void { const nodesToUnsubscribed = [...apoptosizedPks, ...removedPks] if (nodesToUnsubscribed.length > 0) { for (const key of nodesToUnsubscribed) { - if (dataSenders.has(key)) unsubscribeDataSender(key) + if (dataSenders.has(key)) unsubscribeDataSenderWithDataSenders(key) } } if (removedConsensusNodes.length > 0 || apoptosizedConsensusNodes.length > 0) { @@ -390,7 +390,7 @@ function updateNodeList(cycle: P2PTypes.CycleCreatorTypes.CycleData): void { NodeList.realUpdatedTimes.set('/full-nodelist', Date.now()) // To pick nodes only when the archiver is active if (State.isActive) { - subscribeConsensorsByConsensusRadius() + subscribeConsensorsByConsensusRadiusWithDataSenders() } } diff --git a/src/Data/Data.ts b/src/Data/Data.ts index 2ebe8f89..9af48941 100644 --- a/src/Data/Data.ts +++ b/src/Data/Data.ts @@ -1,100 +1,183 @@ import { EventEmitter } from 'events' -import { publicKey, SignedObject } from '@shardeum-foundation/lib-crypto-utils' import * as Crypto from '../Crypto' import * as NodeList from '../NodeList' import * as Cycles from './Cycles' -import { - getCurrentCycleCounter, - currentCycleDuration, - processCycles, - validateCycle, - validateCycleData, - fetchCycleRecords, - getNewestCycleFromArchivers, - getNewestCycleFromConsensors, -} from './Cycles' -import { ChangeSquasher, parse, totalNodeCount, activeNodeCount, applyNodeListChange } from './CycleParser' import * as State from '../State' import * as P2P from '../P2P' import * as Utils from '../Utils' -import { config, updateConfig } from '../Config' +import { config } from '../Config' import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' import * as Logger from '../Logger' import { nestedCountersInstance } from '../profiler/nestedCounters' -import { - storeReceiptData, - storeCycleData, - storeAccountData, - storingAccountData, - storeOriginalTxData, -} from './Collector' import * as CycleDB from '../dbstore/cycles' import * as ReceiptDB from '../dbstore/receipts' import * as OriginalTxDB from '../dbstore/originalTxsData' import * as StateMetaData from '../archivedCycle/StateMetaData' -import fetch from 'node-fetch' -import { syncV2 } from '../sync-v2' import { queryFromArchivers, RequestDataType } from '../API' -import ioclient = require('socket.io-client') -import { Transaction } from '../dbstore/transactions' -import { AccountsCopy } from '../dbstore/accounts' -import { getJson } from '../P2P' import { robustQuery } from '../Utils' -import { Utils as StringUtils } from '@shardeum-foundation/lib-types' -import { cachedCycleRecords, updateCacheFromDB } from '../cache/cycleRecordsCache' -import { XOR } from '../utils/general' -import { customFetch } from '../utils/customHttpFunctions' import { ArchiverLogging } from '../profiler/archiverLogging' -import { Utils as UtilsTypes } from '@shardeum-foundation/lib-types' -import { logEnvSetup } from '../utils/environment' - -interface ValidationBreadcrumb { - cycle: P2PTypes.CycleCreatorTypes.CycleData -} +// Import all split modules +import { + DataRequestTypes, + DataRequest, + DataResponse, + StoredReceiptObject, + CountResponse, + ArchiverAccountResponse, + ArchiverCycleResponse, + ArchiverReceiptResponse, + ArchiverOriginalTxResponse, + ArchiverReceiptCountResponse, + ArchiverOriginalTxsCountResponse, + ArchiverTotalDataResponse, + RequestDataCountType, + DataSender, + CompareResponse, + ArchiverWithRetries, + subscriptionCycleData, + Signer, + ValidatorColletor, + ValidatorCycle, + CombinedAccountsData +} from './types' +export * from './types' -class ValidationTracker { - private seen = new Set() - private breadcrumbs: ValidationBreadcrumb[] = [] - private readonly MAX_ENTRIES = 1000 +import { + getTotalDataFromArchivers, + createDataRequest, + syncGenesisAccountsFromArchiver, + syncGenesisTransactionsFromArchiver, + calcIncomingTimes, + clearDataSenders as clearDataSendersImpl, + sendLeaveRequest, + joinNetwork, + submitJoin, + checkJoinStatus, + sendActiveRequest, + checkActiveStatus, + getCycleDuration, + nodesPerConsensusGroup, + nodesPerEdge +} from './missingFunctions' +export { + getTotalDataFromArchivers, + createDataRequest, + syncGenesisAccountsFromArchiver, + syncGenesisTransactionsFromArchiver, + calcIncomingTimes, + sendLeaveRequest, + joinNetwork, + submitJoin, + checkJoinStatus, + sendActiveRequest, + checkActiveStatus, + getCycleDuration, + nodesPerConsensusGroup, + nodesPerEdge +} - add(breadcrumb: ValidationBreadcrumb): void { - const key = `${breadcrumb.cycle.marker}:${breadcrumb.cycle.previous}` +import { + socketClients, + validationTracker, + ValidationTracker, + unsubscribeDataSender, + initSocketClient, + forwardGenesisAccounts, + setForwardGenesisAccounts +} from './socketClient' +export { socketClients, validationTracker, ValidationTracker, unsubscribeDataSender, initSocketClient } + +import { sendDataRequest } from './dataRequests' +export { sendDataRequest } + +import { clearCombinedAccountsData } from './accountData' +export { clearCombinedAccountsData } - if (this.seen.has(key)) { - return - } +import { + collectCycleData as collectCycleDataRaw, + scoreCert, + syncCycleData +} from './cycleData' +export { scoreCert, syncCycleData } - if (this.breadcrumbs.length >= this.MAX_ENTRIES) { - const oldest = this.breadcrumbs.shift()! - this.seen.delete(`${oldest.cycle.marker}:${oldest.cycle.previous}`) - } +// Export collectCycleData with dataSenders injected +export function collectCycleData( + cycleData: subscriptionCycleData[] | P2PTypes.CycleCreatorTypes.CycleData[], + senderInfo: string, + source: string +): void { + return collectCycleDataRaw(cycleData, senderInfo, source, dataSenders) +} - Logger.mainLogger.warn('[ValidationTracker] Certificate validation failed', { - cycle: breadcrumb.cycle, - }) +import { + syncFromNetworkConfig, + getConsensusRadius, + getCurrentConsensusRadius, + setCurrentConsensusRadius +} from './networkConfig' +export { getConsensusRadius, getCurrentConsensusRadius } - this.seen.add(key) - this.breadcrumbs.push(breadcrumb) - } +import { + createContactTimeout, + addDataSender, + replaceDataSenderMain as replaceDataSender, + subscribeNodeForDataTransfer, + createDataTransferConnection, + createNodesGroupByConsensusRadius, + subscribeConsensorsByConsensusRadius, + subscribeNodeFromThisSubset +} from './nodeSubscription' +export { + createContactTimeout, + addDataSender, + replaceDataSender, + subscribeNodeForDataTransfer, + createDataTransferConnection, + createNodesGroupByConsensusRadius, + subscribeConsensorsByConsensusRadius, + subscribeNodeFromThisSubset } -const validationTracker = new ValidationTracker() - -export const socketClients: Map = new Map() -export let combineAccountsData = { - accounts: [], - receipts: [], +import { + syncGenesisAccountsFromConsensor, + syncGenesisTransactionsFromConsensor, + buildNodeListFromStoredCycle, + syncCyclesAndNodeList, + syncCyclesAndNodeListV2, + syncCyclesBetweenCycles, + syncReceipts, + syncReceiptsByCycle, + syncCyclesAndTxsData, + syncCyclesAndTxsDataBetweenCycles +} from './dataSync' +export { + syncGenesisAccountsFromConsensor, + syncGenesisTransactionsFromConsensor, + buildNodeListFromStoredCycle, + syncCyclesAndNodeList, + syncCyclesAndNodeListV2, + syncCyclesBetweenCycles, + syncReceipts, + syncReceiptsByCycle, + syncCyclesAndTxsData, + syncCyclesAndTxsDataBetweenCycles } -const forwardGenesisAccounts = true -export let currentConsensusRadius = 0 -export let nodesPerConsensusGroup = 0 -export let nodesPerEdge = 0 -let subsetNodesMapByConsensusRadius: Map = new Map() -const maxCyclesInCycleTracker = 5 -const receivedCycleTracker = {} -const QUERY_TIMEOUT_MAX = 30 // 30seconds -const { + +import { + compareWithOldOriginalTxsData, + compareWithOldReceiptsData, + compareWithOldCyclesData +} from './dataComparison' +export { + compareWithOldOriginalTxsData, + compareWithOldReceiptsData, + compareWithOldCyclesData +} + +// Re-export constants and global variables +export const QUERY_TIMEOUT_MAX = 30 // 30seconds +export const { MAX_ACCOUNTS_PER_REQUEST, MAX_RECEIPTS_PER_REQUEST, MAX_ORIGINAL_TXS_PER_REQUEST, @@ -102,2895 +185,101 @@ const { MAX_BETWEEN_CYCLES_PER_REQUEST, } = config.REQUEST_LIMIT -const GENESIS_ACCOUNTS_CYCLE_RANGE = { +export const GENESIS_ACCOUNTS_CYCLE_RANGE = { startCycle: 0, endCycle: 5, } -export enum DataRequestTypes { - SUBSCRIBE = 'SUBSCRIBE', - UNSUBSCRIBE = 'UNSUBSCRIBE', -} - -export interface DataRequest { - type: P2PTypes.SnapshotTypes.TypeName - lastData: P2PTypes.SnapshotTypes.TypeIndex -} - -interface DataResponse { - type: P2PTypes.SnapshotTypes.TypeName - data: T[] -} - -export interface CompareResponse { - success: boolean - matchedCycle: number -} - -interface ArchiverCycleResponse { - cycleInfo: P2PTypes.CycleCreatorTypes.CycleData[] -} - -interface ArchiverTransactionResponse { - totalTransactions: number - transactions: Transaction[] -} - -interface ArchiverAccountResponse { - totalAccounts: number - accounts: AccountsCopy[] -} - -interface ArchiverTotalDataResponse { - totalCycles: number - totalAccounts: number - totalTransactions: number - totalOriginalTxs: number - totalReceipts: number -} - -interface ArchiverReceiptResponse { - receipts: (ReceiptDB.Receipt | ReceiptDB.ReceiptCount)[] | number -} - -interface ArchiverReceiptCountResponse { - receipts: number -} - -interface ArchiverOriginalTxResponse { - originalTxs: (OriginalTxDB.OriginalTxData | OriginalTxDB.OriginalTxDataCount)[] | number -} - -interface ArchiverOriginalTxCountResponse { - originalTxs: number -} -interface IncomingTimes { - quarterDuration: number - startQ1: number - startQ2: number - startQ3: number - startQ4: number - end: number -} - -interface JoinStatus { - isJoined: boolean -} - -export type subscriptionCycleData = Omit & { - certificates: P2PTypes.CycleCreatorTypes.CycleCert[] -} - -export function createDataRequest( - type: P2PTypes.SnapshotTypes.TypeName, - lastData: P2PTypes.SnapshotTypes.TypeIndex, - recipientPk: publicKey -): DataRequest & Crypto.TaggedMessage { - return Crypto.tag>( - { - type, - lastData, - }, - recipientPk - ) -} - -export async function unsubscribeDataSender(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { - Logger.mainLogger.debug('Disconnecting previous connection', publicKey) - const sender = dataSenders.get(publicKey) - if (sender) { - // Clear contactTimeout associated with this sender - if (sender.contactTimeout) { - clearTimeout(sender.contactTimeout) - sender.contactTimeout = null - } - sendDataRequest(sender.nodeInfo, DataRequestTypes.UNSUBSCRIBE) - // Delete sender from dataSenders - dataSenders.delete(publicKey) - } - const socketClient = socketClients.get(publicKey) - if (socketClient) { - socketClient.emit('UNSUBSCRIBE', config.ARCHIVER_PUBLIC_KEY) - socketClient.close() - socketClients.delete(publicKey) - } - nestedCountersInstance.countEvent('archiver', 'remove_data_sender') - Logger.mainLogger.debug('Subscribed dataSenders', dataSenders.size, 'Connected socketClients', socketClients.size) - if (config.VERBOSE) - Logger.mainLogger.debug( - 'Subscribed dataSenders', - dataSenders.keys(), - 'Connected socketClients', - socketClients.keys() - ) -} - -export function initSocketClient(node: NodeList.ConsensusNodeInfo): void { - if (config.VERBOSE) Logger.mainLogger.debug('Node Info to socket connect', node) - - try { - const socketClient = ioclient.connect(`http://${node.ip}:${node.port}`, { - query: { - data: StringUtils.safeStringify( - Crypto.sign({ - publicKey: State.getNodeInfo().publicKey, - timestamp: Date.now(), - intendedConsensor: node.publicKey, - }) - ), - }, - }) - socketClients.set(node.publicKey, socketClient) - - socketClient.on('connect', () => { - Logger.mainLogger.debug(`✅ New Socket Connection to consensus node ${node.ip}:${node.port} is made`) - if (config.VERBOSE) Logger.mainLogger.debug('Connected node', node) - if (config.VERBOSE) Logger.mainLogger.debug('Init socketClients', socketClients.size, dataSenders.size) - }) - - socketClient.once('disconnect', async () => { - Logger.mainLogger.debug(`Connection request is refused by the consensor node ${node.ip}:${node.port}`) - }) - - socketClient.on('DATA', (data: string) => { - const newData: DataResponse & Crypto.TaggedMessage = - StringUtils.safeJsonParse(data) - if (!newData || !newData.responses) return - if (newData.recipient !== State.getNodeInfo().publicKey) { - Logger.mainLogger.debug('This data is not meant for this archiver') - return - } - - // If tag is invalid, dont keepAlive, END - if (Crypto.authenticate(newData) === false) { - Logger.mainLogger.debug('This data cannot be authenticated') - unsubscribeDataSender(node.publicKey) - return - } - - if (config.experimentalSnapshot) { - // Get sender entry - let sender = dataSenders.get(newData.publicKey) - // If no sender entry, remove publicKey from senders, END - if (!sender) { - Logger.mainLogger.error('This sender is not in the subscribed nodes list', newData.publicKey) - // unsubscribeDataSender(newData.publicKey) - return - } - // Clear senders contactTimeout, if it has one - if (sender.contactTimeout) { - if (config.VERBOSE) Logger.mainLogger.debug('Clearing contact timeout.') - clearTimeout(sender.contactTimeout) - sender.contactTimeout = null - nestedCountersInstance.countEvent('archiver', 'clear_contact_timeout') - } - - if (config.VERBOSE) console.log('DATA', sender.nodeInfo.publicKey, sender.nodeInfo.ip, sender.nodeInfo.port) - - if (newData.responses && newData.responses.ORIGINAL_TX_DATA) { - if (config.VERBOSE) - Logger.mainLogger.debug( - 'ORIGINAL_TX_DATA', - sender.nodeInfo.publicKey, - sender.nodeInfo.ip, - sender.nodeInfo.port, - newData.responses.ORIGINAL_TX_DATA.length - ) - // gracefully ignoring since it is now coupled with the receipt flow - // storeOriginalTxData( - // newData.responses.ORIGINAL_TX_DATA, - // sender.nodeInfo.ip + ':' + sender.nodeInfo.port, - // config.saveOnlyGossipData - // ) - } - if (newData.responses && newData.responses.RECEIPT) { - if (config.VERBOSE) - Logger.mainLogger.debug( - 'RECEIPT', - sender.nodeInfo.publicKey, - sender.nodeInfo.ip, - sender.nodeInfo.port, - newData.responses.RECEIPT.length - ) - storeReceiptData( - newData.responses.RECEIPT, - sender.nodeInfo.ip + ':' + sender.nodeInfo.port, - true, - config.saveOnlyGossipData, - true - ) - } - if (newData.responses && newData.responses.CYCLE) { - collectCycleData(newData.responses.CYCLE, sender.nodeInfo.ip + ':' + sender.nodeInfo.port, 'data-sender') - } - if (newData.responses && newData.responses.ACCOUNT) { - if (getCurrentCycleCounter() > GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle) { - Logger.mainLogger.error( - 'Account data is not meant to be received after the genesis accounts cycle range', - getCurrentCycleCounter() - ) - unsubscribeDataSender(sender.nodeInfo.publicKey) - return - } - if ( - Cycles.currentNetworkMode !== 'forming' || - NodeList.byPublicKey.size > 1 || - !NodeList.byPublicKey.has(sender.nodeInfo.publicKey) - ) { - Logger.mainLogger.error( - 'Account data is not meant to be received by the first validator', - `Number of nodes in the network ${NodeList.byPublicKey.size}` - ) - unsubscribeDataSender(sender.nodeInfo.publicKey) - return - } - Logger.mainLogger.debug(`RECEIVED ACCOUNTS DATA FROM ${sender.nodeInfo.ip}:${sender.nodeInfo.port}`) - nestedCountersInstance.countEvent('genesis', 'accounts', 1) - if (!forwardGenesisAccounts) { - Logger.mainLogger.debug('Genesis Accounts To Sycn', newData.responses.ACCOUNT) - syncGenesisAccountsFromConsensor(newData.responses.ACCOUNT, sender.nodeInfo) - } else { - if (storingAccountData) { - Logger.mainLogger.debug('Storing Account Data') - let newCombineAccountsData = { ...combineAccountsData } - if (newData.responses.ACCOUNT.accounts) - newCombineAccountsData.accounts = [ - ...newCombineAccountsData.accounts, - ...newData.responses.ACCOUNT.accounts, - ] - if (newData.responses.ACCOUNT.receipts) - newCombineAccountsData.receipts = [ - ...newCombineAccountsData.receipts, - ...newData.responses.ACCOUNT.receipts, - ] - combineAccountsData = { ...newCombineAccountsData } - newCombineAccountsData = { - accounts: [], - receipts: [], - } - } else storeAccountData(newData.responses.ACCOUNT) - } - } - - // Set new contactTimeout for sender. Postpone sender removal because data is still received from consensor - nestedCountersInstance.countEvent('archiver', 'postpone_contact_timeout') - // To make sure that the sender is still in the subscribed list - sender = dataSenders.get(newData.publicKey) - if (sender) - sender.contactTimeout = createContactTimeout( - sender.nodeInfo.publicKey, - 'This timeout is created after processing data' - ) - } - }) - } catch (error) { - console.error('Error occurred during socket connection:', error) - } -} - -export function collectCycleData( - cycleData: subscriptionCycleData[] | P2PTypes.CycleCreatorTypes.CycleData[], - senderInfo: string, - source: string -): void { - const startTime = Date.now() - const operationId = ArchiverLogging.generateOperationId() - - Logger.mainLogger.debug( - `collectCycleData: Processing ${cycleData.length} cycles from ${senderInfo}, source: ${source}` - ) - - nestedCountersInstance.countEvent('collectCycleData', 'cycles_received', cycleData.length) - nestedCountersInstance.countEvent('collectCycleData', 'source_' + source, 1) - - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: 0, - dataType: 'CYCLE_RECORD', - dataHash: '', - status: 'STARTED', - operationId, - metrics: { - duration: 0, - dataSize: StringUtils.safeStringify(cycleData).length, - }, - }) - - // check if the sender is in the nodelists - if (NodeList.activeListByIdSorted.length > 0) { - const [ip, port] = senderInfo.split(':') - const isInActiveNodes = NodeList.activeListByIdSorted.some( - (node) => node.ip === ip && node.port.toString() === port - ) - const isInActiveArchivers = State.activeArchivers.some( - (archiver) => archiver.ip === ip && archiver.port.toString() === port - ) - if (!isInActiveNodes && !isInActiveArchivers) { - nestedCountersInstance.countEvent('collectCycleData', 'sender_not_active', 1) - Logger.mainLogger.warn(`collectCycleData: Ignoring cycle data from non-active node: ${senderInfo}`) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: 0, - dataType: 'CYCLE_RECORD', - dataHash: '', - status: 'ERROR', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycleData).length, - }, - error: 'Sender not in active nodes or archivers', - }) - return - } - } - - for (const cycle of cycleData) { - Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter}, marker: ${cycle.marker}`) - - if (receivedCycleTracker[cycle.counter]?.saved === true) { - nestedCountersInstance.countEvent('collectCycleData', 'cycle_already_saved_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} already saved, skipping`) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: cycle.marker, - status: 'COMPLETE', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycle).length, - }, - }) - break - } - - nestedCountersInstance.countEvent('collectCycleData', 'process_cycle_' + cycle.mode, 1) - - // since we can trust archivers and archiver only gossip after they have verified the cycleData - // we can just call processCycles here - if (source === 'archiver') { - nestedCountersInstance.countEvent('collectCycleData', 'direct_process_from_archiver', 1) - Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter} from archiver directly`) - processCycles([cycle as P2PTypes.CycleCreatorTypes.CycleData]) - continue - } - - let receivedCertSigners = [] - if (NodeList.activeListByIdSorted.length > 0) { - const certSigners = receivedCycleTracker[cycle.counter]?.[cycle.marker]?.['certSigners'] ?? new Set() - - try { - // need to get the hash(marker) of the cycle as it was in q3/q4 when the certs were made and compared - Logger.mainLogger.debug(`collectCycleData: Original cycle data: ${UtilsTypes.safeStringify(cycle)}`) - const cycleCopy = getRecordWithoutPostQ3Changes(cycle) - const computedMarker = Cycles.computeCycleMarker(cycleCopy) - Logger.mainLogger.debug(`collectCycleData: cycle copy ${UtilsTypes.safeStringify(cycleCopy)}`) - Logger.mainLogger.debug( - `collectCycleData: Computed marker for cycle ${cycle.counter}: ${computedMarker}, original marker: ${cycle.marker}` - ) - Logger.mainLogger.debug( - `collectCycleData: Validating ${(cycle as subscriptionCycleData).certificates?.length || 0} certificates for cycle ${cycle.counter}` - ) - - const validateCertsResult = validateCerts( - (cycle as subscriptionCycleData).certificates, - certSigners, - computedMarker, - cycleCopy as P2PTypes.CycleCreatorTypes.CycleData - ) - - if (validateCertsResult === false) { - nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_failed_' + cycle.mode, 1) - Logger.mainLogger.warn( - `collectCycleData: Certificate validation failed for cycle ${cycle.counter} from ${senderInfo} in ${cycle.mode} mode` - ) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: cycle.marker, - status: 'ERROR', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycle).length, - }, - error: 'Certificate validation failed', - }) - break - } - - nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_success_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: Certificate validation successful for cycle ${cycle.counter}`) - } catch (error) { - nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_error_' + cycle.mode, 1) - Logger.mainLogger.error( - `collectCycleData: Error during certificate validation for cycle ${cycle.counter}: ${error}` - ) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: cycle.marker, - status: 'ERROR', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycle).length, - }, - error: `Certificate validation error: ${error.message}`, - }) - break - } - } - - receivedCertSigners = (cycle as subscriptionCycleData).certificates.map((cert) => cert.sign.owner) - Logger.mainLogger.debug( - `collectCycleData: Received ${receivedCertSigners.length} certificate signers for cycle ${cycle.counter}` - ) - delete (cycle as subscriptionCycleData).certificates - - if (receivedCycleTracker[cycle.counter]) { - if (receivedCycleTracker[cycle.counter][cycle.marker]) { - nestedCountersInstance.countEvent('collectCycleData', 'add_signers_to_existing_marker_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: Adding signers to existing marker for cycle ${cycle.counter}`) - for (const signer of receivedCertSigners) - receivedCycleTracker[cycle.counter][cycle.marker]['certSigners'].add(signer) - } else { - if (!validateCycleData(cycle)) { - nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) - Logger.mainLogger.warn( - `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` - ) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: cycle.marker, - status: 'ERROR', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycle).length, - }, - error: 'Cycle data validation failed', - }) - continue - } - nestedCountersInstance.countEvent('collectCycleData', 'create_new_marker_entry_' + cycle.mode, 1) - Logger.mainLogger.debug( - `collectCycleData: Creating new marker entry for cycle ${cycle.counter} with marker ${cycle.marker}` - ) - receivedCycleTracker[cycle.counter][cycle.marker] = { - cycleInfo: cycle, - certSigners: new Set(receivedCertSigners), - } - Logger.mainLogger.debug('Different Cycle Record received', cycle.counter) - } - receivedCycleTracker[cycle.counter]['received']++ - Logger.mainLogger.debug( - `collectCycleData: Cycle ${cycle.counter} received count: ${receivedCycleTracker[cycle.counter]['received']}` - ) - } else { - if (!validateCycleData(cycle)) { - nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) - Logger.mainLogger.warn( - `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` - ) - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: cycle.marker, - status: 'ERROR', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(cycle).length, - }, - error: 'Cycle data validation failed', - }) - continue - } - nestedCountersInstance.countEvent('collectCycleData', 'create_new_cycle_tracker_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: Creating new cycle tracker entry for cycle ${cycle.counter}`) - receivedCycleTracker[cycle.counter] = { - [cycle.marker]: { - cycleInfo: cycle, - certSigners: new Set(receivedCertSigners), - }, - received: 1, - saved: false, - } - } - if (config.VERBOSE) Logger.mainLogger.debug('Cycle received', cycle.counter, receivedCycleTracker[cycle.counter]) - - if (NodeList.activeListByIdSorted.length === 0) { - nestedCountersInstance.countEvent('collectCycleData', 'no_active_nodes_direct_process_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: No active nodes, processing cycle ${cycle.counter} directly`) - processCycles([receivedCycleTracker[cycle.counter][cycle.marker].cycleInfo]) - continue - } - - const requiredSenders = dataSenders.size ? Math.ceil(dataSenders.size / 2) : 1 - Logger.mainLogger.debug( - `collectCycleData: Cycle ${cycle.counter} requires ${requiredSenders} senders, current count: ${receivedCycleTracker[cycle.counter]['received']}` - ) - - if (receivedCycleTracker[cycle.counter]['received'] >= requiredSenders) { - nestedCountersInstance.countEvent('collectCycleData', 'enough_senders_process_' + cycle.mode, 1) - Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} has enough senders, processing`) - - let bestScore = 0 - let bestMarker = '' - let prevMarker = '' - - // If the cache is empty, update the cache from the db - // This change is to prevent the case where the archiver is not running when the cycle is created - // or the archiver is restarted and the cycle is not in the cache / fetching prev marker from empty cache - if (cachedCycleRecords.length === 0) { - updateCacheFromDB() - .then(() => { - // Verify if cachedCycleRecords[0] is the previous cycle - if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter > 1) { - Logger.mainLogger.debug(`updateCacheFromDB: No previous marker found for cycle ${cycle.counter}`) - } - processCycleWithPrevMarker() - }) - .catch((error) => { - Logger.mainLogger.error(`updateCacheFromDB: Error updating cache from db: ${error}`) - }) - } else { - processCycleWithPrevMarker() - } - - function processCycleWithPrevMarker() { - if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter === 1) { - prevMarker = cachedCycleRecords[0].marker - Logger.mainLogger.debug(`collectCycleData: Previous marker for scoring: ${prevMarker}`) - } else { - Logger.mainLogger.debug(`collectCycleData: No previous marker found for cycle ${cycle.counter}`) - return - } - // find the marker with largest sum of its top 3 cert scores - const markers = Object.entries(receivedCycleTracker[cycle.counter]) - .filter(([key]) => key !== 'saved' && key !== 'received') - .map(([, value]) => value) - - Logger.mainLogger.debug( - `collectCycleData: Found ${markers.length} different markers for cycle ${cycle.counter}` - ) - - for (const marker of markers) { - const scores = [] - for (const signer of marker['certSigners']) { - const score = scoreCert(signer as string, prevMarker) - scores.push(score) - Logger.mainLogger.debug(`collectCycleData: Cert from ${signer} scored ${score}`) - } - // get sum of top 3 scores: sort scores in desc order, then slice off first 3 elements, and add them - const sum = scores - .sort((a, b) => b - a) - .slice(0, 3) - .reduce((sum, score) => (sum += score), 0) - - Logger.mainLogger.debug(`collectCycleData: Marker ${marker['cycleInfo'].marker} scored ${sum}`) - - if (sum > bestScore) { - bestScore = sum - bestMarker = marker['cycleInfo'].marker - Logger.mainLogger.debug(`collectCycleData: New best marker: ${bestMarker} with score ${bestScore}`) - } - } - - Logger.mainLogger.debug( - `collectCycleData: Processing cycle ${cycle.counter} with best marker ${bestMarker}, score: ${bestScore}` - ) - processCycles([receivedCycleTracker[cycle.counter][bestMarker].cycleInfo]) - receivedCycleTracker[cycle.counter]['saved'] = true - - nestedCountersInstance.countEvent('collectCycleData', 'cycle_processed_successfully_' + cycle.mode, 1) - - ArchiverLogging.logDataSync({ - sourceArchiver: senderInfo, - targetArchiver: config.ARCHIVER_IP, - cycle: cycle.counter, - dataType: 'CYCLE_RECORD', - dataHash: bestMarker, - status: 'COMPLETE', - operationId, - metrics: { - duration: Date.now() - startTime, - dataSize: StringUtils.safeStringify(receivedCycleTracker[cycle.counter][bestMarker].cycleInfo).length, - }, - }) - } - } - } - - if (Object.keys(receivedCycleTracker).length > maxCyclesInCycleTracker) { - nestedCountersInstance.countEvent('collectCycleData', 'cleanup_old_cycles', 1) - Logger.mainLogger.debug( - `collectCycleData: Cleaning up old cycles, current count: ${Object.keys(receivedCycleTracker).length}` - ) - for (const counter of Object.keys(receivedCycleTracker)) { - // Clear cycles that are older than last maxCyclesInCycleTracker cycles - if (parseInt(counter) < getCurrentCycleCounter() - maxCyclesInCycleTracker) { - let totalTimes = receivedCycleTracker[counter]['received'] - let logCycle = false - - const markers = Object.entries(receivedCycleTracker[counter]) - .filter(([key]) => key !== 'saved' && key !== 'received') - .map(([, value]) => value) - - // If there is more than one marker for this cycle, output the cycle log - if (markers.length > 1) { - logCycle = true - nestedCountersInstance.countEvent('collectCycleData', 'multiple_markers_for_cycle', 1) - } - - for (const marker of markers) { - Logger.mainLogger.debug( - 'Cycle', - counter, - marker, - /* eslint-disable security/detect-object-injection */ - logCycle ? StringUtils.safeStringify([...receivedCycleTracker[counter][marker]['certSigners']]) : '', - logCycle ? receivedCycleTracker[counter][marker] : '' - ) - } - if (logCycle) Logger.mainLogger.debug(`Cycle ${counter} has ${markers.length} different markers!`) - Logger.mainLogger.debug(`Received ${totalTimes} times for cycle counter ${counter}`) - // eslint-disable-next-line security/detect-object-injection - delete receivedCycleTracker[counter] - } - } - } -} - -export function clearCombinedAccountsData(): void { - combineAccountsData = { - accounts: [], - receipts: [], - } -} - -export interface DataSender { - nodeInfo: NodeList.ConsensusNodeInfo - types: (keyof typeof P2PTypes.SnapshotTypes.TypeNames)[] - contactTimeout?: NodeJS.Timeout | null - replaceTimeout?: NodeJS.Timeout | null -} - export const dataSenders: Map = new Map() - export const emitter = new EventEmitter() -export async function replaceDataSender(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { - nestedCountersInstance.countEvent('archiver', 'replace_data_sender') - if (NodeList.getActiveNodeCount() < 2) { - Logger.mainLogger.debug('There is only one active node in the network. Unable to replace data sender') - return - } - Logger.mainLogger.debug(`replaceDataSender: replacing ${publicKey}`) - - if (!socketClients.has(publicKey) || !dataSenders.has(publicKey)) { - Logger.mainLogger.debug( - 'This data sender is not in the subscribed list! and unsubscribing it', - publicKey, - socketClients.has(publicKey), - dataSenders.has(publicKey) - ) - unsubscribeDataSender(publicKey) - return - } - unsubscribeDataSender(publicKey) - // eslint-disable-next-line security/detect-object-injection - const node = NodeList.byPublicKey.get(publicKey) - if (node) { - const nodeIndex = NodeList.activeListByIdSorted.findIndex((node) => node.publicKey === publicKey) - if (nodeIndex > -1) { - const subsetIndex = Math.floor(nodeIndex / currentConsensusRadius) - const subsetNodesList = subsetNodesMapByConsensusRadius.get(subsetIndex) - if (!subsetNodesList) { - Logger.mainLogger.error(`There is no nodes in the index ${subsetIndex} of subsetNodesMapByConsensusRadius!`) - return - } - subscribeNodeFromThisSubset(subsetNodesList, subsetIndex) - } - } -} - -export async function subscribeNodeForDataTransfer(): Promise { - if(config.passiveMode) { - Logger.mainLogger.debug('Archiver is in passive mode. Skipping data transfer subscription.') - return - } - - if (config.experimentalSnapshot) { - await subscribeConsensorsByConsensusRadius() - } else { - await StateMetaData.subscribeRandomNodeForDataTransfer() - } -} - -/** - * Sets 15s timeout - * Removes sender from dataSenders on timeout - * Select a new dataSender - */ -export function createContactTimeout(publicKey: NodeList.ConsensusNodeInfo['publicKey'], msg = ''): NodeJS.Timeout { - const CONTACT_TIMEOUT_MS = 10 * 1000 // Change contact timeout to 10s - if (config.VERBOSE) Logger.mainLogger.debug('Created contact timeout: ' + CONTACT_TIMEOUT_MS, `for ${publicKey}`) - nestedCountersInstance.countEvent('archiver', 'contact_timeout_created') - return setTimeout(() => { - // Logger.mainLogger.debug('nestedCountersInstance', nestedCountersInstance) - if (nestedCountersInstance) nestedCountersInstance.countEvent('archiver', 'contact_timeout') - Logger.mainLogger.debug('REPLACING sender due to CONTACT timeout', msg, publicKey) - replaceDataSender(publicKey) - }, CONTACT_TIMEOUT_MS) -} - -export function addDataSender(sender: DataSender): void { - dataSenders.set(sender.nodeInfo.publicKey, sender) -} - -async function syncFromNetworkConfig(): Promise { - try { - // Define the query function to get the network config from a node - const queryFn = async (node): Promise => { - const REQUEST_NETCONFIG_TIMEOUT_SECOND = 3 // 3s timeout - try { - const response = await P2P.getJson(`http://${node.ip}:${node.port}/netconfig`, REQUEST_NETCONFIG_TIMEOUT_SECOND) - return response - } catch (error) { - Logger.mainLogger.error(`Error querying node ${node.ip}:${node.port}: ${error}`) - return null - } - } - // Define the equality function to compare two responses - const equalityFn = (responseA, responseB): boolean => { - return responseA?.config?.sharding?.nodesPerConsensusGroup === responseB?.config?.sharding?.nodesPerConsensusGroup - } - // Get the list of 10 max random active nodes or the first node if no active nodes are available - const nodes = NodeList.getActiveNodeCount() > 0 ? NodeList.getRandomActiveNodes(10) : [NodeList.getFirstNode()] - // Use robustQuery to get the consensusRadius from multiple nodes - const tallyItem = await robustQuery( - nodes, - queryFn, - equalityFn, - 3 // Redundancy (minimum 3 nodes should return the same result to reach consensus) - ) - if (tallyItem?.value?.config?.stateManager) { - // Updating the Archiver Config as per the latest Network Config - const { - useNewPOQ: newPOQReceipt, - configChangeMaxChangesToKeep, - configChangeMaxCyclesToKeep, - maxCyclesShardDataToKeep, - } = tallyItem.value.config.stateManager - // const devPublicKeys = tallyItem.value.config.debug.devPublicKeys - // const devPublicKey = - // devPublicKeys && - // Object.keys(devPublicKeys).length >= 3 && - // Object.keys(devPublicKeys).find((key) => devPublicKeys[key] === 3) - // if ( - // devPublicKey && - // typeof devPublicKey === typeof config.DevPublicKey && - // devPublicKey !== config.DevPublicKey - // ) - // updateConfig({ DevPublicKey: devPublicKey })count query) - if ( - !Utils.isUndefined(newPOQReceipt) && - typeof newPOQReceipt === typeof config.newPOQReceipt && - newPOQReceipt !== config.newPOQReceipt - ) - updateConfig({ newPOQReceipt }) - if ( - !Utils.isUndefined(configChangeMaxChangesToKeep) && - typeof configChangeMaxChangesToKeep === typeof config.configChangeMaxChangesToKeep && - configChangeMaxChangesToKeep !== config.configChangeMaxChangesToKeep - ) - updateConfig({ configChangeMaxChangesToKeep }) - if ( - !Utils.isUndefined(configChangeMaxCyclesToKeep) && - typeof configChangeMaxCyclesToKeep === typeof config.configChangeMaxCyclesToKeep && - configChangeMaxCyclesToKeep !== config.configChangeMaxCyclesToKeep - ) - updateConfig({ configChangeMaxCyclesToKeep }) - if ( - !Utils.isUndefined(maxCyclesShardDataToKeep) && - typeof maxCyclesShardDataToKeep === typeof config.maxCyclesShardDataToKeep && - maxCyclesShardDataToKeep !== config.maxCyclesShardDataToKeep - ) - updateConfig({ maxCyclesShardDataToKeep }) - return tallyItem - } - return null - } catch (error) { - Logger.mainLogger.error('❌ Error in syncFromNetworkConfig: ', error) - return null - } -} - -export async function getConsensusRadius(): Promise { - // If there is no node, return existing currentConsensusRadius - if (NodeList.isEmpty()) return currentConsensusRadius - - const tallyItem = await syncFromNetworkConfig() - if (tallyItem?.value?.config) { - const nodesPerEdgeFromConfig = tallyItem.value.config.sharding?.nodesPerEdge - const nodesPerConsensusGroupFromConfig = tallyItem.value.config.sharding?.nodesPerConsensusGroup - - if (!Number.isInteger(nodesPerConsensusGroupFromConfig) || nodesPerConsensusGroupFromConfig <= 0) { - Logger.mainLogger.error('nodesPerConsensusGroup is not a valid number:', nodesPerConsensusGroupFromConfig) - return currentConsensusRadius - } - - if (!Number.isInteger(nodesPerEdgeFromConfig) || nodesPerEdgeFromConfig <= 0) { - Logger.mainLogger.error('nodesPerEdge is not a valid number:', nodesPerEdgeFromConfig) - return currentConsensusRadius - } - if (nodesPerConsensusGroup === nodesPerConsensusGroupFromConfig && nodesPerEdge === nodesPerEdgeFromConfig) - return currentConsensusRadius - nodesPerConsensusGroup = nodesPerConsensusGroupFromConfig - nodesPerEdge = nodesPerEdgeFromConfig - // Upgrading consensus size to an odd number - if (nodesPerConsensusGroup % 2 === 0) nodesPerConsensusGroup++ - const consensusRadius = Math.floor((nodesPerConsensusGroup - 1) / 2) - // Validation: Ensure consensusRadius is a number and greater than zero - if (typeof consensusRadius !== 'number' || isNaN(consensusRadius) || consensusRadius <= 0) { - Logger.mainLogger.error('Invalid consensusRadius:', consensusRadius) - return currentConsensusRadius // Return the existing currentConsensusRadius in case of invalid consensusRadius - } - Logger.mainLogger.debug( - 'consensusRadius', - consensusRadius, - 'nodesPerConsensusGroup', - nodesPerConsensusGroup, - 'nodesPerEdge', - nodesPerEdge - ) - return consensusRadius - } - Logger.mainLogger.error('Failed to get consensusRadius from the network') - // If no consensus was reached, return the existing currentConsensusRadius - return currentConsensusRadius -} - -export async function createDataTransferConnection(newSenderInfo: NodeList.ConsensusNodeInfo): Promise { - // // Verify node before subscribing for data transfer - // const status = await verifyNode(newSenderInfo) - // if (!status) return false - // Subscribe this node for dataRequest - const response = await sendDataRequest(newSenderInfo, DataRequestTypes.SUBSCRIBE) - if (response) { - initSocketClient(newSenderInfo) - // Add new dataSender to dataSenders - const newSender: DataSender = { - nodeInfo: newSenderInfo, - types: [P2PTypes.SnapshotTypes.TypeNames.CYCLE, P2PTypes.SnapshotTypes.TypeNames.STATE_METADATA], - contactTimeout: createContactTimeout( - newSenderInfo.publicKey, - 'This timeout is created during newSender selection' - ), - } - addDataSender(newSender) - Logger.mainLogger.debug(`added new sender ${newSenderInfo.publicKey} to dataSenders`) - } - return response -} - -function shouldSubscribeToMoreConsensors(): boolean { - return config.subscribeToMoreConsensors && currentConsensusRadius > 5 -} - -export async function createNodesGroupByConsensusRadius(): Promise { - const consensusRadius = await getConsensusRadius() - if (consensusRadius === 0) { - Logger.mainLogger.error('Consensus radius is 0, unable to create nodes group.') - return // Early return to prevent further execution - } - currentConsensusRadius = consensusRadius - const activeList = [...NodeList.activeListByIdSorted] - if (config.VERBOSE) Logger.mainLogger.debug('activeList', activeList.length, activeList) - let totalNumberOfNodesToSubscribe = Math.ceil(activeList.length / consensusRadius) - if (shouldSubscribeToMoreConsensors()) { - totalNumberOfNodesToSubscribe += totalNumberOfNodesToSubscribe * config.extraConsensorsToSubscribe - } - Logger.mainLogger.debug('totalNumberOfNodesToSubscribe', totalNumberOfNodesToSubscribe) - subsetNodesMapByConsensusRadius = new Map() - let round = 0 - for (let i = 0; i < activeList.length; i += consensusRadius) { - const subsetList: NodeList.ConsensusNodeInfo[] = activeList.slice(i, i + consensusRadius) - subsetNodesMapByConsensusRadius.set(round, subsetList) - round++ - } - if (config.VERBOSE) Logger.mainLogger.debug('subsetNodesMapByConsensusRadius', subsetNodesMapByConsensusRadius) -} - -export async function subscribeConsensorsByConsensusRadius(): Promise { - await createNodesGroupByConsensusRadius() - for (const [i, subsetList] of subsetNodesMapByConsensusRadius) { - if (config.VERBOSE) Logger.mainLogger.debug('Round', i, 'subsetList', subsetList, dataSenders.keys()) - subscribeNodeFromThisSubset(subsetList, i) - } -} - -export async function subscribeNodeFromThisSubset( - nodeList: NodeList.ConsensusNodeInfo[], - roundIndex: number -): Promise { - // First check if there is any subscribed node from this subset - const subscribedNodesFromThisSubset = [] - for (const node of nodeList) { - if (dataSenders.has(node.publicKey)) { - if (config.VERBOSE) - Logger.mainLogger.debug('This node from the subset is in the subscribed list!', node.publicKey) - subscribedNodesFromThisSubset.push(node.publicKey) - } - } - let numberOfNodesToSubsribe = 1 - if (shouldSubscribeToMoreConsensors()) { - numberOfNodesToSubsribe += config.extraConsensorsToSubscribe - nestedCountersInstance.countEvent( - 'nodeSubscription', - 'add extra consensor(s): ' + config.extraConsensorsToSubscribe - ) - } else { - nestedCountersInstance.countEvent('nodeSubscription', 'add consensor: ') - } - if (subscribedNodesFromThisSubset.length > numberOfNodesToSubsribe) { - // If there is more than one subscribed node from this subset, unsubscribe the extra ones - for (const publicKey of subscribedNodesFromThisSubset.splice(numberOfNodesToSubsribe)) { - Logger.mainLogger.debug('Unsubscribing extra node from this subset', publicKey) - unsubscribeDataSender(publicKey) - } - } - if (config.VERBOSE) Logger.mainLogger.debug('Subscribed nodes from this subset', subscribedNodesFromThisSubset) - if (subscribedNodesFromThisSubset.length === numberOfNodesToSubsribe) return - Logger.mainLogger.debug( - `Subscribing node(s) from this subset! numberOfNodesToSubsribe: ${numberOfNodesToSubsribe} roundIndex: ${roundIndex}` - ) - // Pick a new dataSender from this subset - let subsetList = [...nodeList] - // Pick a random dataSender - let newSenderInfo = nodeList[Math.floor(Math.random() * nodeList.length)] - let connectionStatus = false - let retry = 0 - const MAX_RETRY_SUBSCRIPTION = 3 * numberOfNodesToSubsribe - while (retry < MAX_RETRY_SUBSCRIPTION && subscribedNodesFromThisSubset.length < numberOfNodesToSubsribe) { - if (!dataSenders.has(newSenderInfo.publicKey)) { - connectionStatus = await createDataTransferConnection(newSenderInfo) - if (connectionStatus) { - // Check if the newSender is in the subscribed nodes of this subset - if (!subscribedNodesFromThisSubset.includes(newSenderInfo.publicKey)) { - subscribedNodesFromThisSubset.push(newSenderInfo.publicKey) - Logger.mainLogger.debug( - `Added new sender to the subscribed nodes of this subset. publicKey:${newSenderInfo.publicKey}, numberOfNodesToSubsribe:${numberOfNodesToSubsribe}, roundIndex${roundIndex}` - ) - } - } - } else { - // Add the newSender to the subscribed nodes of this subset - if (!subscribedNodesFromThisSubset.includes(newSenderInfo.publicKey)) { - subscribedNodesFromThisSubset.push(newSenderInfo.publicKey) - Logger.mainLogger.debug( - `accounting for existing? sender to the subscribed nodes of this subset. publicKey:${newSenderInfo.publicKey}, numberOfNodesToSubsribe:${numberOfNodesToSubsribe}, roundIndex${roundIndex}` - ) - } - } - subsetList = subsetList.filter((node) => node.publicKey !== newSenderInfo.publicKey) - if (subsetList.length > 0) { - newSenderInfo = subsetList[Math.floor(Math.random() * subsetList.length)] - } else { - subsetList = [...nodeList] - retry++ - } - } -} - -// This function is used for both subscribe and unsubscribe for data request -export async function sendDataRequest( - nodeInfo: NodeList.ConsensusNodeInfo, - dataRequestType: DataRequestTypes -): Promise { - const dataRequest = { - dataRequestCycle: getCurrentCycleCounter(), - dataRequestType, - publicKey: State.getNodeInfo().publicKey, - nodeInfo: State.getNodeInfo(), - } - const taggedDataRequest = Crypto.tag(dataRequest, nodeInfo.publicKey) - Logger.mainLogger.info(`Sending ${dataRequestType} data request to consensor.`, nodeInfo.ip + ':' + nodeInfo.port) - let reply = false - const REQUEST_DATA_TIMEOUT_SECOND = 2 // 2s timeout - const response = await P2P.postJson( - `http://${nodeInfo.ip}:${nodeInfo.port}/requestdata`, - taggedDataRequest, - REQUEST_DATA_TIMEOUT_SECOND - ) - Logger.mainLogger.debug('/requestdata response', response, nodeInfo.ip + ':' + nodeInfo.port) - if (response && response.success) reply = response.success - return reply -} - -export const clearDataSenders = async (): Promise => { - for (const [publicKey] of dataSenders) { - unsubscribeDataSender(publicKey) - } - await Utils.sleep(2000) // Wait for 2s to make sure all dataSenders are unsubscribed - dataSenders.clear() - socketClients.clear() - subsetNodesMapByConsensusRadius.clear() -} - -export function calcIncomingTimes(record: P2PTypes.CycleCreatorTypes.CycleRecord): IncomingTimes { - const SECOND = 1000 - const cycleDuration = record.duration * SECOND - const quarterDuration = cycleDuration / 4 - const start = record.start * SECOND + cycleDuration - const startQ1 = start - const startQ2 = start + quarterDuration - const startQ3 = start + 2 * quarterDuration - const startQ4 = start + 3 * quarterDuration - const end = start + cycleDuration - return { quarterDuration, startQ1, startQ2, startQ3, startQ4, end } -} - -export async function joinNetwork(nodeList: NodeList.ConsensusNodeInfo[], isFirstTime: boolean): Promise { - if (config.passiveMode) { - Logger.mainLogger.debug('joinNetwork-skipped passive mode') - return true - } - - - Logger.mainLogger.debug('Is firstTime', isFirstTime) - if (!isFirstTime) { - const isJoined: boolean = await checkJoinStatus(nodeList) - if (isJoined) { - return isJoined - } - } - Logger.mainLogger.debug('nodeList To Submit Join Request', nodeList) - // try to get latestCycleRecord with a robust query - const latestCycle = await getNewestCycleFromConsensors(nodeList) - - // Figure out when Q1 is from the latestCycle - const { startQ1 } = calcIncomingTimes(latestCycle) - const shuffledNodes = [...nodeList] - Utils.shuffleArray(shuffledNodes) - - // Wait until a Q1 then send join request to active nodes - let untilQ1 = startQ1 - Date.now() - while (untilQ1 < 0) { - untilQ1 += latestCycle.duration * 1000 - } - - Logger.mainLogger.debug(`Waiting ${untilQ1 + 500} ms for Q1 before sending join...`) - await Utils.sleep(untilQ1 + 500) // Not too early - - // Create a fresh join request, so that the request timestamp range is acceptable - const request = P2P.createArchiverJoinRequest() - await submitJoin(nodeList, request) - - // Wait approx. one cycle then check again - Logger.mainLogger.debug('Waiting approx. one cycle then checking again...') - await Utils.sleep(latestCycle.duration * 1000 + 500) - return false -} - -export async function submitJoin( - nodes: NodeList.ConsensusNodeInfo[], - joinRequest: P2P.ArchiverJoinRequest & SignedObject -): Promise { - if (config.passiveMode) { - Logger.mainLogger.debug('submitJoin-skipped passive mode') - return - } - // Send the join request to a handful of the active node all at once:w - const selectedNodes = Utils.getRandom(nodes, Math.min(nodes.length, 5)) - Logger.mainLogger.debug(`Sending join request to ${selectedNodes.map((n) => `${n.ip}:${n.port}`)}`) - for (const node of selectedNodes) { - const response = await P2P.postJson(`http://${node.ip}:${node.port}/joinarchiver`, joinRequest) - Logger.mainLogger.debug('Join request response:', response) - } -} - -export async function sendLeaveRequest(nodes: NodeList.ConsensusNodeInfo[]): Promise { - if (config.passiveMode) { - Logger.mainLogger.debug('sendLeaveRequest-skipped passive mode') - return - } - - const leaveRequest = P2P.createArchiverLeaveRequest() - Logger.mainLogger.debug(`Sending leave request to ${nodes.map((n) => `${n.ip}:${n.port}`)}`) - - const promises = nodes.map((node) => - customFetch(`http://${node.ip}:${node.port}/leavingarchivers`, { - method: 'post', - body: StringUtils.safeStringify(leaveRequest), - headers: { 'Content-Type': 'application/json' }, - timeout: 2 * 1000, // 2s timeout - }).then((res) => res.json()) - ) - - await Promise.allSettled(promises) - .then((responses) => { - let i = 0 - let isLeaveRequestSent = false - for (const response of responses) { - // eslint-disable-next-line security/detect-object-injection - const node = nodes[i] - if (response.status === 'fulfilled') { - const res = response.value - if (res.success) isLeaveRequestSent = true - Logger.mainLogger.debug(`Leave request response from ${node.ip}:${node.port}:`, res) - } else Logger.mainLogger.debug(`Node is not responding ${node.ip}:${node.port}`) - i++ - } - Logger.mainLogger.debug('isLeaveRequestSent', isLeaveRequestSent) - }) - .catch((error) => { - // Handle any errors that occurred - console.error(error) - }) -} - -export async function sendActiveRequest(): Promise { - if (config.passiveMode) { - Logger.mainLogger.debug('sendActiveRequest-skipped passive mode') - return - } +// Re-export functions from original Data.ts that need to be kept +export { getCurrentCycleCounter } from './Cycles' +export { queryFromArchivers, RequestDataType } from '../API' +export { robustQuery } from '../Utils' +export { fetchCycleRecords, getNewestCycleFromArchivers } from './Cycles' - Logger.mainLogger.debug('Sending Active Request to the network!') - const latestCycleInfo = await CycleDB.queryLatestCycleRecords(1) - const latestCycle = latestCycleInfo[0] - // Figure out when Q1 is from the latestCycle - const { startQ1 } = calcIncomingTimes(latestCycle) - - // Wait until a Q1 then send active request to active nodes - let untilQ1 = startQ1 - Date.now() - while (untilQ1 < 0) { - untilQ1 += latestCycle.duration * 1000 - } - - Logger.mainLogger.debug(`Waiting ${untilQ1 + 500} ms for Q1 before sending active...`) - await Utils.sleep(untilQ1 + 500) // Not too early - - const activeRequest = P2P.createArchiverActiveRequest() - // Send the active request to a handful of the active node all at once:w - const nodes = NodeList.getRandomActiveNodes(5) - Logger.mainLogger.debug(`Sending active request to ${nodes.map((n) => `${n.ip}:${n.port}`)}`) - - const promises = nodes.map((node) => - customFetch(`http://${node.ip}:${node.port}/activearchiver`, { - method: 'post', - body: StringUtils.safeStringify(activeRequest), - headers: { 'Content-Type': 'application/json' }, - timeout: 2 * 1000, // 2s timeout - }).then((res) => res.json()) - ) - - await Promise.allSettled(promises) - .then((responses) => { - let i = 0 - for (const response of responses) { - // eslint-disable-next-line security/detect-object-injection - const node = nodes[i] - if (response.status === 'fulfilled') { - const res = response.value - Logger.mainLogger.debug(`Active request response from ${node.ip}:${node.port}:`, res) - } else Logger.mainLogger.debug(`Node is not responding ${node.ip}:${node.port}`) - i++ - } - }) - .catch((error) => { - // Handle any errors that occurred - console.error(error) - }) - - // Wait approx. one cycle then check again - Logger.mainLogger.debug('Waiting approx. one cycle then checking again...') - await Utils.sleep(latestCycle.duration * 1000 + 500) -} - -export async function getCycleDuration(): Promise { - const response = (await queryFromArchivers(RequestDataType.CYCLE, { count: 1 })) as ArchiverCycleResponse - if (response && response.cycleInfo) { - return response.cycleInfo[0].duration - } - return 0 -} - -/* - checkJoinStatus checks if the current archiver node is joined to a network. - This queries by the /joinedArchiver endpoint on the nodes and returns joining status based on majority response. -*/ -export async function checkJoinStatus(activeNodes: NodeList.ConsensusNodeInfo[]): Promise { - if (config.passiveMode) { - Logger.mainLogger.debug('checkJoinStatus-skipped passive mode') - return false - } - - Logger.mainLogger.debug('checkJoinStatus: Checking join status') - const ourNodeInfo = State.getNodeInfo() - - const queryFn = async (node: NodeList.ConsensusNodeInfo): Promise => { - const url = `http://${node.ip}:${node.port}/joinedArchiver/${ourNodeInfo.publicKey}` - try { - return (await getJson(url)) as JoinStatus - } catch (e) { - Logger.mainLogger.error(`Error querying node ${node.ip}:${node.port}: ${e}`) - throw e - } - } - - try { - const joinStatus = await robustQuery(activeNodes, queryFn) - Logger.mainLogger.debug(`checkJoinStatus: Join status: ${joinStatus.value.isJoined}`) - return joinStatus.value.isJoined - } catch (e) { - Logger.mainLogger.error(`Error in checkJoinStatus: ${e}`) - return false - } -} - -// This will be used once activeArchivers field is added to the cycle record -export async function checkActiveStatus(): Promise { - if (config.checkActiveStatus) { - console.log('checkJoinStatus-skipped passive mode') - return false - } - - Logger.mainLogger.debug('Checking active status') - const ourNodeInfo = State.getNodeInfo() - try { - const latestCycle = await getNewestCycleFromArchivers() - - if (latestCycle && latestCycle['activeArchivers']) { - const activeArchivers = latestCycle['activeArchivers'] - Logger.mainLogger.debug('cycle counter', latestCycle.counter) - Logger.mainLogger.debug('Active archivers', activeArchivers) - - const isActive = activeArchivers.some((a: State.ArchiverNodeInfo) => a.publicKey === ourNodeInfo.publicKey) - Logger.mainLogger.debug('isActive', isActive) - return isActive - } else { - return false - } - } catch (e) { - Logger.mainLogger.error(e) - return false - } -} - -export async function getTotalDataFromArchivers(): Promise { - const res = (await queryFromArchivers( - RequestDataType.TOTALDATA, - {}, - QUERY_TIMEOUT_MAX - )) as ArchiverTotalDataResponse | null - // @ts-ignore - if (!res || (res.success !== undefined && res.success === false)) { - return null - } - return res -} - -export async function syncGenesisAccountsFromArchiver(): Promise { - let complete = false - let startAccount = 0 - let endAccount = startAccount + MAX_ACCOUNTS_PER_REQUEST - let totalGenesisAccounts = 0 - // const totalExistingGenesisAccounts = - // await AccountDB.queryAccountCountBetweenCycles(0, 5); - // if (totalExistingGenesisAccounts > 0) { - // // Let's assume it has synced data for now, update to sync account count between them - // return; - // } - const res = (await queryFromArchivers( - RequestDataType.ACCOUNT, - { startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle }, - QUERY_TIMEOUT_MAX - )) as ArchiverAccountResponse - if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Accounts Response', StringUtils.safeStringify(res)) - if (res && (res.totalAccounts || res.totalAccounts === 0)) { - totalGenesisAccounts = res.totalAccounts - Logger.mainLogger.debug('TotalGenesis Accounts', totalGenesisAccounts) - } else { - Logger.mainLogger.error('Genesis Total Accounts Query', 'Invalid download response') - return - } - if (totalGenesisAccounts <= 0) return - let page = 1 - while (!complete) { - Logger.mainLogger.debug(`Downloading accounts from ${startAccount} to ${endAccount}`) - const response = (await queryFromArchivers( - RequestDataType.ACCOUNT, - { - startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, - endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, - page, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverAccountResponse - if (response && response.accounts) { - if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { - complete = true - Logger.mainLogger.debug('Download completed for accounts') - } - Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) - await storeAccountData({ accounts: response.accounts }) - startAccount = endAccount + 1 - endAccount += MAX_ACCOUNTS_PER_REQUEST - page++ - } else { - Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') - } - // await sleep(1000); - } - Logger.mainLogger.debug('Sync genesis accounts completed!') -} +// Functions that need to be implemented here because they're used by multiple modules +import { + storeReceiptData as storeReceiptDataImpl, + storeOriginalTxData as storeOriginalTxDataImpl, + storeAccountData as storeAccountDataImpl, + storeCycleData as storeCycleDataImpl, + storingAccountData +} from './Collector' -export async function syncGenesisTransactionsFromArchiver(): Promise { - let complete = false - let startTransaction = 0 - let endTransaction = startTransaction + MAX_ACCOUNTS_PER_REQUEST // Sames as number of accounts per request - let totalGenesisTransactions = 0 +// Re-export storingAccountData flag +export { storingAccountData } - const res = (await queryFromArchivers( - RequestDataType.TRANSACTION, - { - startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, - endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverTransactionResponse - if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Transaction Response', StringUtils.safeStringify(res)) - if (res && (res.totalTransactions || res.totalTransactions === 0)) { - totalGenesisTransactions = res.totalTransactions - Logger.mainLogger.debug('TotalGenesis Transactions', totalGenesisTransactions) - } else { - Logger.mainLogger.error('Genesis Total Transaction Query', 'Invalid download response') - return - } - if (totalGenesisTransactions <= 0) return - let page = 1 - while (!complete) { - Logger.mainLogger.debug(`Downloading transactions from ${startTransaction} to ${endTransaction}`) - const response = (await queryFromArchivers( - RequestDataType.TRANSACTION, - { - startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, - endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, - page, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverTransactionResponse - if (response && response.transactions) { - if (response.transactions.length < MAX_ACCOUNTS_PER_REQUEST) { - complete = true - Logger.mainLogger.debug('Download completed for transactions') - } - Logger.mainLogger.debug(`Downloaded transactions`, response.transactions.length) - await storeAccountData({ receipts: response.transactions }) - startTransaction = endTransaction + 1 - endTransaction += MAX_ACCOUNTS_PER_REQUEST - page++ - } else { - Logger.mainLogger.debug('Genesis Transactions Query', 'Invalid download response') - } - // await sleep(1000); - } - Logger.mainLogger.debug('Sync genesis transactions completed!') -} - -export async function syncGenesisAccountsFromConsensor( - totalGenesisAccounts = 0, - firstConsensor: NodeList.ConsensusNodeInfo -): Promise { - if (totalGenesisAccounts <= 0) return - let startAccount = 0 - // let combineAccountsData = []; - let totalDownloadedAccounts = 0 - while (startAccount <= totalGenesisAccounts) { - Logger.mainLogger.debug(`Downloading accounts from ${startAccount}`) - const response = (await P2P.getJson( - `http://${firstConsensor.ip}:${firstConsensor.port}/genesis_accounts?start=${startAccount}`, - QUERY_TIMEOUT_MAX - )) as ArchiverAccountResponse - if (response && response.accounts) { - if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { - Logger.mainLogger.debug('Download completed for accounts') - } - Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) - // TODO - update to include receipts data also - await storeAccountData({ accounts: response.accounts }) - // combineAccountsData = [...combineAccountsData, ...response.accounts]; - totalDownloadedAccounts += response.accounts.length - startAccount += MAX_ACCOUNTS_PER_REQUEST - } else { - Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') - } - // await sleep(1000); - } - Logger.mainLogger.debug(`Total downloaded accounts`, totalDownloadedAccounts) - // await storeAccountData(combineAccountsData); - Logger.mainLogger.debug('Sync genesis accounts completed!') +export async function storeReceiptData( + receipts: any[], + senderInfo: string, + validate: boolean, + saveOnlyGossipData: boolean, + gossipReceipt: boolean +): Promise { + return storeReceiptDataImpl(receipts, senderInfo, validate, saveOnlyGossipData, gossipReceipt) } -export async function buildNodeListFromStoredCycle( - lastStoredCycle: P2PTypes.CycleCreatorTypes.CycleData +export async function storeOriginalTxData( + originalTxs: any[], + senderInfo: string, + saveOnlyGossipData: boolean ): Promise { - Logger.mainLogger.debug('lastStoredCycle', lastStoredCycle) - Logger.mainLogger.debug('buildNodeListFromStoredCycle:') - Logger.mainLogger.debug(`Syncing till cycle ${lastStoredCycle.counter}...`) - const cyclesToGet = 2 * Math.floor(Math.sqrt(lastStoredCycle.active)) + 2 - Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) - - const CycleChain = [] - const squasher = new ChangeSquasher() - - CycleChain.unshift(lastStoredCycle) - squasher.addChange(parse(CycleChain[0])) - - do { - // Get prevCycles from the network - let end: number = CycleChain[0].counter - 1 - let start: number = end - cyclesToGet - if (start < 0) start = 0 - if (end < start) end = start - Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) - const prevCycles = await CycleDB.queryCycleRecordsBetween(start, end) - - // If prevCycles is empty, start over - if (prevCycles.length < 1) throw new Error('Got empty previous cycles') - - prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) - - // Add prevCycles to our cycle chain - let prepended = 0 - for (const prevCycle of prevCycles) { - // Prepend the cycle to our cycle chain - CycleChain.unshift(prevCycle) - squasher.addChange(parse(prevCycle)) - prepended++ - - if ( - squasher.final.updated.length >= activeNodeCount(lastStoredCycle) && - squasher.final.added.length >= totalNodeCount(lastStoredCycle) - ) { - break - } - } - - Logger.mainLogger.debug( - `Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(lastStoredCycle)}` - ) - Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(lastStoredCycle)}`) - if (squasher.final.added.length < totalNodeCount(lastStoredCycle)) - Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + lastStoredCycle.counter) - - // If you weren't able to prepend any of the prevCycles, start over - if (prepended < 1) throw new Error('Unable to prepend any previous cycles') - } while ( - squasher.final.updated.length < activeNodeCount(lastStoredCycle) || - squasher.final.added.length < totalNodeCount(lastStoredCycle) - ) - - applyNodeListChange(squasher.final) - Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) - Cycles.setCurrentCycleCounter(lastStoredCycle.counter) - Cycles.setCurrentCycleMarker(lastStoredCycle.marker) - Cycles.setCurrentCycleDuration(lastStoredCycle.duration) - Logger.mainLogger.debug('Latest cycle after sync', lastStoredCycle.counter) -} - -export async function syncCyclesAndNodeList(lastStoredCycleCount = 0): Promise { - // Get the networks newest cycle as the anchor point for sync - Logger.mainLogger.debug('Getting newest cycle...') - const cycleToSyncTo = await getNewestCycleFromArchivers() - Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) - Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) - - const cyclesToGet = 2 * Math.floor(Math.sqrt(cycleToSyncTo.active)) + 2 - Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) - - const CycleChain = [] - const squasher = new ChangeSquasher() - - CycleChain.unshift(cycleToSyncTo) - squasher.addChange(parse(CycleChain[0])) - - do { - // Get prevCycles from the network - let end: number = CycleChain[0].counter - 1 - let start: number = end - cyclesToGet - if (start < 0) start = 0 - if (end < start) end = start - Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) - const prevCycles = await fetchCycleRecords(start, end) - - // If prevCycles is empty, start over - if (prevCycles.length < 1) throw new Error('Got empty previous cycles') - - prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) - - // Add prevCycles to our cycle chain - let prepended = 0 - for (const prevCycle of prevCycles) { - // Stop prepending prevCycles if one of them is invalid - if (validateCycle(prevCycle, CycleChain[0]) === false) { - Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) - break - } - // Prepend the cycle to our cycle chain - CycleChain.unshift(prevCycle) - squasher.addChange(parse(prevCycle)) - prepended++ - - if ( - squasher.final.updated.length >= activeNodeCount(cycleToSyncTo) && - squasher.final.added.length >= totalNodeCount(cycleToSyncTo) - ) { - break - } - } - - Logger.mainLogger.debug(`Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(cycleToSyncTo)}`) - Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(cycleToSyncTo)}`) - if (squasher.final.added.length < totalNodeCount(cycleToSyncTo)) - Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + cycleToSyncTo.counter) - - // If you weren't able to prepend any of the prevCycles, start over - if (prepended < 1) throw new Error('Unable to prepend any previous cycles') - } while ( - squasher.final.updated.length < activeNodeCount(cycleToSyncTo) || - squasher.final.added.length < totalNodeCount(cycleToSyncTo) - ) - - applyNodeListChange(squasher.final) - Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) - - for (let i = 0; i < CycleChain.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const record = CycleChain[i] - Cycles.CycleChain.set(record.counter, { ...record }) - if (i === CycleChain.length - 1) await storeCycleData(CycleChain) - Cycles.setCurrentCycleCounter(record.counter) - Cycles.setCurrentCycleMarker(record.marker) - } - Logger.mainLogger.debug('Cycle chain is synced. Size of CycleChain', Cycles.CycleChain.size) - - // Download old cycle Records - let endCycle = CycleChain[0].counter - 1 - Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) - if (endCycle > lastStoredCycleCount) { - Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) - } - let savedCycleRecord = CycleChain[0] - while (endCycle > lastStoredCycleCount) { - let nextEnd: number = endCycle - MAX_CYCLES_PER_REQUEST - if (nextEnd < 0) nextEnd = 0 - Logger.mainLogger.debug(`Getting cycles ${nextEnd} - ${endCycle} ...`) - const prevCycles = await fetchCycleRecords(nextEnd, endCycle) - - // If prevCycles is empty, start over - if (!prevCycles || prevCycles.length < 1) throw new Error('Got empty previous cycles') - prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) - - // Add prevCycles to our cycle chain - const combineCycles = [] - for (const prevCycle of prevCycles) { - // Stop saving prevCycles if one of them is invalid - if (validateCycle(prevCycle, savedCycleRecord) === false) { - Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) - Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) - break - } - savedCycleRecord = prevCycle - combineCycles.push(prevCycle) - } - await storeCycleData(combineCycles) - endCycle = nextEnd - 1 - } -} - -export async function syncCyclesAndNodeListV2( - activeArchivers: State.ArchiverNodeInfo[], - lastStoredCycleCount = 0 -): Promise { - // Sync validator list and get the latest cycle from the network - Logger.mainLogger.debug('Syncing validators and latest cycle...') - const syncResult = await syncV2(activeArchivers) - let cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData - if (syncResult.isOk()) { - cycleToSyncTo = syncResult.value - } else { - throw syncResult.error - } - - Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) - Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) - - currentConsensusRadius = await getConsensusRadius() - await processCycles([cycleToSyncTo]) - - // Download old cycle Records - await downloadOldCycles(cycleToSyncTo, lastStoredCycleCount) - - return true -} - -export async function syncCyclesBetweenCycles(lastStoredCycle = 0, cycleToSyncTo = 0): Promise { - const MAX_RETRIES = 3 - let retryCount = 0 - - let startCycle = lastStoredCycle - let endCycle = startCycle + MAX_CYCLES_PER_REQUEST - - while (cycleToSyncTo > startCycle) { - if (endCycle > cycleToSyncTo) endCycle = cycleToSyncTo - Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) - - let success = false - retryCount = 0 - - while (!success && retryCount < MAX_RETRIES) { - const res = (await queryFromArchivers( - RequestDataType.CYCLE, - { - start: startCycle, - end: endCycle, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverCycleResponse - - if (res && res.cycleInfo) { - const cycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] - Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) - - let validCyclesCount = 0 - for (const cycle of cycles) { - if (!validateCycleData(cycle)) { - Logger.mainLogger.debug('Found invalid cycle data') - continue - } - await processCycles([cycle]) - validCyclesCount++ - } - - success = true - - if (cycles.length < MAX_CYCLES_PER_REQUEST || validCyclesCount === 0) { - startCycle += Math.max(cycles.length, 1) - endCycle = startCycle + MAX_CYCLES_PER_REQUEST - if (startCycle >= cycleToSyncTo) { - Logger.mainLogger.debug('Sync cycles completed!') - return true - } - break - } - } else { - Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - retryCount++ - if (retryCount >= MAX_RETRIES) { - Logger.mainLogger.error('Max retries reached for cycle download') - return false - } - } - } - - if (success) { - startCycle = endCycle + 1 - endCycle += MAX_CYCLES_PER_REQUEST - } - } - - return true -} - -import { getLastUpdatedCycle, updateLastUpdatedCycle } from '../utils/cycleTracker' - -export async function syncReceipts(): Promise { - const MAX_RETRIES = 3 - let retryCount = 0 - - // Get the last updated cycle from tracker file - const lastUpdatedCycle = getLastUpdatedCycle() - Logger.mainLogger.debug(`[syncReceipts] Last updated cycle from tracker: ${lastUpdatedCycle}`) - - // If we have a valid last updated cycle, use it as the starting point - let startCycle = 0 - if (lastUpdatedCycle > 0) { - Logger.mainLogger.info(`[syncReceipts] Starting receipt sync from last updated cycle: ${lastUpdatedCycle}`) - startCycle = Math.max(lastUpdatedCycle - config.checkpoint.syncCycleBuffer, 0) - await syncReceiptsByCycle(startCycle) - return - } - - let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (!response || response.totalReceipts < 0) { - return - } - - let { totalReceipts } = response - if (totalReceipts < 1) return - - let complete = false - let start = 0 - let end = start + MAX_RECEIPTS_PER_REQUEST - - while (!complete) { - if (end >= totalReceipts) { - response = await getTotalDataFromArchivers() - if (response && response.totalReceipts > 0) { - if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts - Logger.mainLogger.debug('totalReceiptsToSync', totalReceipts) - } - } - - Logger.mainLogger.debug(`Downloading receipts from ${start} to ${end}`) - let success = false - retryCount = 0 - - while (!success && retryCount < MAX_RETRIES) { - const res = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - start: start, - end: end, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptResponse - - if (res && res.receipts) { - const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] - Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) - await storeReceiptData(downloadedReceipts, '', false, false, true) - success = true - - if (downloadedReceipts.length < MAX_RECEIPTS_PER_REQUEST) { - start += downloadedReceipts.length - end = start + MAX_RECEIPTS_PER_REQUEST - response = await getTotalDataFromArchivers() - if (response && response.totalReceipts > 0) { - if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts - if (start >= totalReceipts) { - complete = true - Logger.mainLogger.debug('Download receipts completed') - } - } - } - } else { - Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - retryCount++ - if (retryCount >= MAX_RETRIES) { - Logger.mainLogger.error('Max retries reached for receipt download') - start = end + 1 - end += MAX_RECEIPTS_PER_REQUEST - // Check if we've passed total receipts after incrementing - if (start >= totalReceipts) { - complete = true - } - } - } - } - - if (success) { - start = end + 1 - end += MAX_RECEIPTS_PER_REQUEST - } - } - - Logger.mainLogger.debug('Sync receipts data completed!') + return storeOriginalTxDataImpl(originalTxs, senderInfo, saveOnlyGossipData) } -interface ArchiverWithRetries { - archiver: State.ArchiverNodeInfo - retriesLeft: number +export async function storeAccountData(data: { accounts?: any[]; receipts?: any[] }): Promise { + return storeAccountDataImpl(data) } -class ArchiverSelector { - private archivers: ArchiverWithRetries[] - private currentIndex: number = 0 - private readonly maxRetries: number = 3 - - constructor() { - this.archivers = State.otherArchivers.map((archiver) => ({ - archiver, - retriesLeft: this.maxRetries, - })) - Utils.shuffleArray(this.archivers) - } - - getCurrentArchiver(): State.ArchiverNodeInfo | null { - if (this.currentIndex >= this.archivers.length) { - return null - } - return this.archivers[this.currentIndex].archiver - } - - markCurrentArchiverFailed(): State.ArchiverNodeInfo | null { - if (this.currentIndex >= this.archivers.length) { - return null - } - - this.archivers[this.currentIndex].retriesLeft-- - - if (this.archivers[this.currentIndex].retriesLeft <= 0) { - this.currentIndex++ - } - - return this.getCurrentArchiver() - } - - hasMoreArchivers(): boolean { - return this.currentIndex < this.archivers.length - } +export async function storeCycleData(cycles: P2PTypes.CycleCreatorTypes.CycleData[]): Promise { + return storeCycleDataImpl(cycles) } -export async function syncReceiptsByCycle(lastStoredReceiptCycle = 0, cycleToSyncTo = 0): Promise { - // Get the last updated cycle from tracker if not provided - if (lastStoredReceiptCycle === 0) { - const trackedCycle = getLastUpdatedCycle() - if (trackedCycle > 0) { - Logger.mainLogger.info(`[syncReceiptsByCycle] Using last updated cycle from tracker: ${trackedCycle}`) - lastStoredReceiptCycle = Math.max(trackedCycle - config.checkpoint.syncCycleBuffer, 0) - } - } - - let totalCycles = cycleToSyncTo - let totalReceipts = 0 - if (cycleToSyncTo === 0) { - const response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (!response || response.totalReceipts < 0) { - return false - } - totalCycles = response.totalCycles - totalReceipts = response.totalReceipts - } - let startCycle = lastStoredReceiptCycle - let endCycle = startCycle + MAX_BETWEEN_CYCLES_PER_REQUEST - let receiptsCountToSyncBetweenCycles = 0 - let savedReceiptsCountBetweenCycles = 0 - let totalSavedReceiptsCount = 0 - let archiverSelector = new ArchiverSelector() - - while (true) { - if (endCycle > totalCycles) { - endCycle = totalCycles - totalSavedReceiptsCount = await ReceiptDB.queryReceiptCount() - } - if (cycleToSyncTo > 0) { - if (startCycle > cycleToSyncTo) { - Logger.mainLogger.debug(`Sync receipts data completed!`) - return true - } - } else { - if (totalSavedReceiptsCount >= totalReceipts) { - const res: any = await getTotalDataFromArchivers() - if (res && res.totalReceipts > 0) { - if (res.totalReceipts > totalReceipts) totalReceipts = res.totalReceipts - if (res.totalCycles > totalCycles) totalCycles = res.totalCycles - Logger.mainLogger.debug('totalReceiptsToSync', totalReceipts, 'totalSavedReceipts', totalSavedReceiptsCount) - if (totalSavedReceiptsCount === totalReceipts) { - Logger.mainLogger.debug('Sync receipts data completed!') - return true - } - } - } - } - if (startCycle > endCycle) { - Logger.mainLogger.error( - `Got some issues in syncing receipts. Receipts query startCycle ${startCycle} is greater than endCycle ${endCycle}` - ) - return false - } - - const currentArchiver = archiverSelector.getCurrentArchiver() - if (!currentArchiver || !archiverSelector.hasMoreArchivers()) { - Logger.mainLogger.error('All archivers exhausted') - return false - } - - Logger.mainLogger.debug( - `Downloading receipts from cycle ${startCycle} to cycle ${endCycle} using archiver ${currentArchiver.ip}:${currentArchiver.port}` - ) - let response = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - startCycle, - endCycle, - type: 'count', - archiver: currentArchiver, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptCountResponse - - if (response && response.receipts > 0) { - receiptsCountToSyncBetweenCycles = response.receipts - let page = 1 - savedReceiptsCountBetweenCycles = 0 - while (savedReceiptsCountBetweenCycles < receiptsCountToSyncBetweenCycles) { - const res = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - startCycle, - endCycle, - page, - archiver: currentArchiver, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptResponse - if (res && res.receipts && Array.isArray(res.receipts) && res.receipts.length > 0) { - const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] - Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) - await storeReceiptData(downloadedReceipts, '', false, false, true) - savedReceiptsCountBetweenCycles += downloadedReceipts.length - if (savedReceiptsCountBetweenCycles > receiptsCountToSyncBetweenCycles) { - response = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - startCycle, - endCycle, - type: 'count', - archiver: currentArchiver, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptCountResponse - if (response && response.receipts) receiptsCountToSyncBetweenCycles = response.receipts - if (receiptsCountToSyncBetweenCycles > savedReceiptsCountBetweenCycles) { - savedReceiptsCountBetweenCycles -= downloadedReceipts.length - continue - } - } - Logger.mainLogger.debug( - 'savedReceiptsCountBetweenCycles', - savedReceiptsCountBetweenCycles, - 'receiptsCountToSyncBetweenCycles', - receiptsCountToSyncBetweenCycles - ) - if (savedReceiptsCountBetweenCycles > receiptsCountToSyncBetweenCycles) { - Logger.mainLogger.debug( - `It has downloaded more receipts than it has in cycles between ${startCycle} and ${endCycle} !` - ) - } - totalSavedReceiptsCount += downloadedReceipts.length - page++ - } else { - Logger.mainLogger.debug('Invalid or empty download response') - const nextArchiver = archiverSelector.markCurrentArchiverFailed() - if (nextArchiver) { - Logger.mainLogger.debug(`Switching to next archiver: ${nextArchiver.ip}:${nextArchiver.port}`) - continue - } - if (!archiverSelector.hasMoreArchivers()) { - Logger.mainLogger.error('All archivers exhausted') - return false - } - } - } - Logger.mainLogger.debug(`Download receipts completed for ${startCycle} - ${endCycle}`) - // Update checkpoint status for completed cycles - - // Update the cycle tracker with the latest cycle we've processed - updateLastUpdatedCycle(endCycle) - Logger.mainLogger.debug(`[syncReceiptsByCycle] Updated cycle tracker to cycle ${endCycle}`) - - startCycle = endCycle + 1 - endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST - archiverSelector = new ArchiverSelector() - } else { - receiptsCountToSyncBetweenCycles = response?.receipts || 0 - if (receiptsCountToSyncBetweenCycles === 0) { - startCycle = endCycle + 1 - endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST - - archiverSelector = new ArchiverSelector() - continue - } - Logger.mainLogger.debug('Invalid download response') - const nextArchiver = archiverSelector.markCurrentArchiverFailed() - if (nextArchiver) { - Logger.mainLogger.debug(`Switching to next archiver: ${nextArchiver.ip}:${nextArchiver.port}`) - continue - } - if (!archiverSelector.hasMoreArchivers()) { - Logger.mainLogger.error('All archivers exhausted') - return false - } - } - } +import { processCycles as processCyclesImpl } from './Cycles' +export async function processCycles(cycles: P2PTypes.CycleCreatorTypes.CycleData[]): Promise { + return processCyclesImpl(cycles) } -export const syncOriginalTxs = async (): Promise => { - const MAX_RETRIES = 3 - let retryCount = 0 - - let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (!response || response.totalOriginalTxs < 0) { - return - } - - let { totalOriginalTxs } = response - if (totalOriginalTxs < 1) return - - let complete = false - let start = 0 - let end = start + MAX_ORIGINAL_TXS_PER_REQUEST - - while (!complete) { - if (end >= totalOriginalTxs) { - // If the number of new original txs to sync is within MAX_ORIGINAL_TXS_PER_REQUEST => Update to the latest totalOriginalTxs. - response = await getTotalDataFromArchivers() - if (response && response.totalOriginalTxs > 0) { - if (response.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = response.totalOriginalTxs - Logger.mainLogger.debug('totalOriginalTxs: ', totalOriginalTxs) - } - } - - Logger.mainLogger.debug(`Downloading Original-Txs from ${start} to ${end}`) - let success = false - retryCount = 0 - - while (!success && retryCount < MAX_RETRIES) { - const res: any = await queryFromArchivers( - RequestDataType.ORIGINALTX, - { - start: start, - end: end, - }, - QUERY_TIMEOUT_MAX - ) - - if (res && res.originalTxs) { - const downloadedOriginalTxs = res.originalTxs - Logger.mainLogger.debug('Downloaded Original-Txs: ', downloadedOriginalTxs.length) - await storeOriginalTxData(downloadedOriginalTxs) - success = true - - if (downloadedOriginalTxs.length < MAX_ORIGINAL_TXS_PER_REQUEST) { - start += downloadedOriginalTxs.length - end = start + MAX_ORIGINAL_TXS_PER_REQUEST - response = await getTotalDataFromArchivers() - if (response && response.totalOriginalTxs > 0) { - if (response.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = response.totalOriginalTxs - if (start >= totalOriginalTxs) { - complete = true - Logger.mainLogger.debug('Download Original-Txs Completed!') - } - } - break - } - } else { - Logger.mainLogger.debug(`Invalid Original-Tx download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - retryCount++ - if (retryCount >= MAX_RETRIES) { - Logger.mainLogger.error('Max retries reached for Original-Tx download') - start = end + 1 - end += MAX_ORIGINAL_TXS_PER_REQUEST - if (start >= totalOriginalTxs) { - complete = true - } - } - } - } - - if (success) { - start = end + 1 - end += MAX_ORIGINAL_TXS_PER_REQUEST - } - } - - Logger.mainLogger.debug('Sync Original-Txs Data Completed!') +// Update socket client initialization to use the global dataSenders +export function initSocketClientWithDataSenders(node: NodeList.ConsensusNodeInfo): void { + initSocketClient(node, dataSenders, createContactTimeout) } -export const syncOriginalTxsByCycle = async (lastStoredOriginalTxCycle = 0, cycleToSyncTo = 0): Promise => { - let totalCycles = cycleToSyncTo - let totalOriginalTxs = 0 - if (cycleToSyncTo === 0) { - const response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (!response || response.totalOriginalTxs < 1) { - return - } - totalCycles = response.totalCycles - totalOriginalTxs = response.totalOriginalTxs - } - const complete = false - let startCycle = lastStoredOriginalTxCycle - let endCycle = startCycle + MAX_BETWEEN_CYCLES_PER_REQUEST - let originalTxCountToSyncBetweenCycles = 0 - let savedOriginalTxCountBetweenCycles = 0 - let totalSavedOriginalTxCount = 0 - while (!complete) { - if (endCycle > totalCycles) { - endCycle = totalCycles - totalSavedOriginalTxCount = await OriginalTxDB.queryOriginalTxDataCount() - } - if (cycleToSyncTo > 0) { - if (startCycle > cycleToSyncTo) { - Logger.mainLogger.debug(`Sync originalTXs data completed!`) - break - } - } else { - if (totalSavedOriginalTxCount >= totalOriginalTxs) { - const res: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (res && res.totalOriginalTxs > 0) { - if (res.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = res.totalOriginalTxs - if (res.totalCycles > totalCycles) totalCycles = res.totalCycles - Logger.mainLogger.debug( - 'totalOriginalTxsToSync: ', - totalOriginalTxs, - 'totalSavedOriginalTxs: ', - totalSavedOriginalTxCount - ) - if (totalSavedOriginalTxCount === totalOriginalTxs) { - Logger.mainLogger.debug('Sync Original-Tx data completed!') - break - } - } - } - } - if (startCycle > endCycle) { - Logger.mainLogger.error( - `Got some issues in syncing Original-Tx data. Original-Tx query startCycle ${startCycle} is greater than endCycle ${endCycle}` - ) - break - } - Logger.mainLogger.debug(`Downloading Original-Tx data from cycle ${startCycle} to cycle ${endCycle}`) - let response = (await queryFromArchivers( - RequestDataType.ORIGINALTX, - { - startCycle, - endCycle, - type: 'count', - }, - QUERY_TIMEOUT_MAX - )) as ArchiverOriginalTxCountResponse - if (response && response.originalTxs > 0) { - originalTxCountToSyncBetweenCycles = response.originalTxs - let page = 1 - savedOriginalTxCountBetweenCycles = 0 - while (savedOriginalTxCountBetweenCycles < originalTxCountToSyncBetweenCycles) { - const res = (await queryFromArchivers( - RequestDataType.ORIGINALTX, - { - startCycle, - endCycle, - page, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverOriginalTxResponse - if (res && res.originalTxs) { - const downloadedOriginalTxs = res.originalTxs as OriginalTxDB.OriginalTxData[] - Logger.mainLogger.debug('Downloaded Original-Txs: ', downloadedOriginalTxs.length) - await storeOriginalTxData(downloadedOriginalTxs) - savedOriginalTxCountBetweenCycles += downloadedOriginalTxs.length - if (savedOriginalTxCountBetweenCycles > originalTxCountToSyncBetweenCycles) { - response = (await queryFromArchivers( - RequestDataType.ORIGINALTX, - { - startCycle, - endCycle, - type: 'count', - }, - QUERY_TIMEOUT_MAX - )) as ArchiverOriginalTxCountResponse - if (response && response.originalTxs) originalTxCountToSyncBetweenCycles = response.originalTxs - if (originalTxCountToSyncBetweenCycles > savedOriginalTxCountBetweenCycles) { - savedOriginalTxCountBetweenCycles -= downloadedOriginalTxs.length - continue - } - } - Logger.mainLogger.debug( - 'savedOriginalTxCountBetweenCycles', - savedOriginalTxCountBetweenCycles, - 'originalTxCountToSyncBetweenCycles', - originalTxCountToSyncBetweenCycles - ) - if (savedOriginalTxCountBetweenCycles > originalTxCountToSyncBetweenCycles) { - Logger.mainLogger.debug( - `It has downloaded more originalTxsData than it has in cycles between ${startCycle} and ${endCycle} !` - ) - } - totalSavedOriginalTxCount += downloadedOriginalTxs.length - page++ - } else { - Logger.mainLogger.debug('Invalid Original-Txs download response') - continue - } - } - Logger.mainLogger.debug(`Download Original-Txs completed for ${startCycle} - ${endCycle}`) - startCycle = endCycle + 1 - endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST - } else { - originalTxCountToSyncBetweenCycles = response.originalTxs - if (originalTxCountToSyncBetweenCycles === 0) { - startCycle = endCycle + 1 - endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST - continue - } - Logger.mainLogger.debug('Invalid Original-Txs download response') - continue - } - } +export async function unsubscribeDataSenderWithDataSenders(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { + unsubscribeDataSender(publicKey, dataSenders) } -export const syncCyclesAndTxsData = async ( - lastStoredCycleCount = 0, - lastStoredReceiptCount = 0, - lastStoredOriginalTxCount = 0 -): Promise => { - const MAX_RETRIES = 3 - let retryCount = 0 - - // Get the last updated cycle from tracker if not provided - if (lastStoredCycleCount === 0) { - const trackedCycle = getLastUpdatedCycle() - if (trackedCycle > 0) { - Logger.mainLogger.info(`[syncCyclesAndTxsData] Using last updated cycle from tracker: ${trackedCycle}`) - lastStoredCycleCount = Math.max(trackedCycle - config.checkpoint.syncCycleBuffer, 0) - } - } - - let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() - if (!response || response.totalCycles < 0 || response.totalReceipts < 0) { - return - } - const { totalCycles, totalReceipts } = response - Logger.mainLogger.debug('totalCycles', totalCycles, 'lastStoredCycleCount', lastStoredCycleCount) - Logger.mainLogger.debug('totalReceipts', totalReceipts, 'lastStoredReceiptCount', lastStoredReceiptCount) - // Logger.mainLogger.debug( - // 'totalOriginalTxs', - // totalOriginalTxs, - // 'lastStoredOriginalTxCount', - // lastStoredOriginalTxCount - // ) - if ( - totalCycles === lastStoredCycleCount && - totalReceipts === lastStoredReceiptCount - // && totalOriginalTxs === lastStoredOriginalTxCount - ) { - Logger.mainLogger.debug('The archiver has synced the lastest cycle ,receipts and originalTxs data!') - return - } - let totalReceiptsToSync = totalReceipts - // let totalOriginalTxsToSync = totalOriginalTxs - let totalCyclesToSync = totalCycles - let completeForReceipt = false - // let completeForOriginalTx = false - let completeForCycle = false - let startReceipt = lastStoredReceiptCount - // let startOriginalTx = lastStoredOriginalTxCount - let startCycle = lastStoredCycleCount - let endReceipt = startReceipt + MAX_RECEIPTS_PER_REQUEST - // let endOriginalTx = startOriginalTx + MAX_ORIGINAL_TXS_PER_REQUEST - let endCycle = startCycle + MAX_CYCLES_PER_REQUEST - - if (totalCycles === lastStoredCycleCount) completeForCycle = true - if (totalReceipts === lastStoredReceiptCount) completeForReceipt = true - // if (totalOriginalTxs === lastStoredOriginalTxCount) completeForOriginalTx = true - - while ( - !completeForReceipt || - !completeForCycle - // || !completeForOriginalTx - ) { - if ( - endReceipt >= totalReceiptsToSync || - endCycle >= totalCyclesToSync - // || endOriginalTx >= totalOriginalTxsToSync - ) { - response = await getTotalDataFromArchivers() - if ( - response && - response.totalReceipts && - response.totalCycles - // && response.totalOriginalTxs - ) { - if (response.totalReceipts !== totalReceiptsToSync) { - completeForReceipt = false - totalReceiptsToSync = response.totalReceipts - } - // if (response.totalOriginalTxs !== totalOriginalTxsToSync) { - // completeForOriginalTx = false - // totalOriginalTxsToSync = response.totalOriginalTxs - // } - if (response.totalCycles !== totalCyclesToSync) { - completeForCycle = false - totalCyclesToSync = response.totalCycles - } - if (totalReceiptsToSync < startReceipt) { - completeForReceipt = true - } - // if (totalOriginalTxsToSync < startOriginalTx) { - // completeForOriginalTx = true - // } - if (totalCyclesToSync < startCycle) { - completeForCycle = true - } - Logger.mainLogger.debug( - 'totalReceiptsToSync', - totalReceiptsToSync, - // 'totalOriginalTxsToSync', - // totalOriginalTxsToSync, - 'totalCyclesToSync', - totalCyclesToSync - ) - } - } - if (!completeForReceipt) { - Logger.mainLogger.debug(`Downloading receipts from ${startReceipt} to ${endReceipt}`) - let success = false - retryCount = 0 - - while (!success && retryCount < MAX_RETRIES) { - const res = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - start: startReceipt, - end: endReceipt, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptResponse - - if (res && res.receipts) { - const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] - Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) - await storeReceiptData(downloadedReceipts, '', false, false, true) - success = true - if (downloadedReceipts.length < MAX_ORIGINAL_TXS_PER_REQUEST) { - startReceipt += downloadedReceipts.length + 1 - endReceipt += downloadedReceipts.length + MAX_ORIGINAL_TXS_PER_REQUEST - } - } else { - Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - retryCount++ - if (retryCount >= MAX_RETRIES) { - Logger.mainLogger.error('Max retries reached for receipt download') - } - } - } - if (success) { - startReceipt = endReceipt + 1 - endReceipt += MAX_ORIGINAL_TXS_PER_REQUEST - } - } - - // if (!completeForOriginalTx) { - // Logger.mainLogger.debug(`Downloading Original-Txs from ${startOriginalTx} to ${endOriginalTx}`) - // let success = false - // retryCount = 0 - - // while (!success && retryCount < MAX_RETRIES) { - // const res = (await queryFromArchivers( - // RequestDataType.ORIGINALTX, - // { - // start: startOriginalTx, - // end: endOriginalTx, - // }, - // QUERY_TIMEOUT_MAX - // )) as ArchiverOriginalTxResponse - - // if (res && res.originalTxs) { - // const downloadedOriginalTxs = res.originalTxs as OriginalTxDB.OriginalTxData[] - // Logger.mainLogger.debug(`Downloaded Original-Txs: `, downloadedOriginalTxs.length) - // await storeOriginalTxData(downloadedOriginalTxs) - // success = true - - // if (downloadedOriginalTxs.length < MAX_ORIGINAL_TXS_PER_REQUEST) { - // startOriginalTx += downloadedOriginalTxs.length + 1 - // endOriginalTx += downloadedOriginalTxs.length + MAX_ORIGINAL_TXS_PER_REQUEST - // break - // } - // } else { - // Logger.mainLogger.debug(`Invalid Original-Tx download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - // retryCount++ - // if (retryCount >= MAX_RETRIES) { - // Logger.mainLogger.error('Max retries reached for Original-Tx download') - // startOriginalTx = endOriginalTx + 1 - // endOriginalTx += MAX_ORIGINAL_TXS_PER_REQUEST - // } - // continue - // } - // } - // if (success) { - // startOriginalTx = endOriginalTx + 1 - // endOriginalTx += MAX_ORIGINAL_TXS_PER_REQUEST - // } - // } - - if (!completeForCycle) { - Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) - let success = false - retryCount = 0 - - while (!success && retryCount < MAX_RETRIES) { - const res = (await queryFromArchivers( - RequestDataType.CYCLE, - { - start: startCycle, - end: endCycle, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverCycleResponse - if (res && res.cycleInfo) { - const cycles = res.cycleInfo - Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) - for (const cycle of cycles) { - if (!validateCycleData(cycle)) { - Logger.mainLogger.debug('Found invalid cycle data') - continue - } - await processCycles([cycle]) - } - success = true - - // Update the cycle tracker with the highest cycle we've processed - const highestCycle = cycles.reduce((max, cycle) => Math.max(max, cycle.counter), 0) - if (highestCycle > 0) { - updateLastUpdatedCycle(highestCycle) - Logger.mainLogger.debug(`[syncCyclesAndTxsData] Updated cycle tracker to cycle ${highestCycle}`) - } - - if (cycles.length < MAX_CYCLES_PER_REQUEST) { - startCycle += cycles.length + 1 - endCycle += cycles.length + MAX_CYCLES_PER_REQUEST - } - } else { - Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) - retryCount++ - if (retryCount >= MAX_RETRIES) { - Logger.mainLogger.error('Max retries reached for cycle download') - } - } - } - if (success) { - startCycle = endCycle + 1 - endCycle += MAX_CYCLES_PER_REQUEST - } - } - } - Logger.mainLogger.debug('Sync Cycle, Receipt & Original-Tx data completed!') +export async function replaceDataSenderWithDataSenders(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { + return replaceDataSender(publicKey, dataSenders) } -export const syncCyclesAndTxsDataBetweenCycles = async (lastStoredCycle = 0, cycleToSyncTo = 0): Promise => { - Logger.mainLogger.debug(`Syncing cycles and txs data between cycles ${lastStoredCycle} and ${cycleToSyncTo}`) - await syncCyclesBetweenCycles(lastStoredCycle, cycleToSyncTo) - await syncReceiptsByCycle(lastStoredCycle, cycleToSyncTo) - // await syncOriginalTxsByCycle(lastStoredCycle, cycleToSyncTo) -} - -// // simple method to validate old data; it's not good when there are multiple archivers, the receipts saving order may not be the same -// export async function compareWithOldReceiptsData( -// archiver: State.ArchiverNodeInfo, -// lastReceiptCount = 0 -// ) { -// let downloadedReceipts -// const response: any = await P2P.getJson( -// `http://${archiver.ip}:${archiver.port}/receipt?start=${ -// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0 -// }&end=${lastReceiptCount}` -// ) -// if (response && response.receipts) { -// downloadedReceipts = response.receipts -// } else { -// throw Error( -// `Can't fetch data from receipt ${ -// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0 -// } to receipt ${lastReceiptCount} from archiver ${archiver}` -// ) -// } -// let oldReceipts = await ReceiptDB.queryReceipts( -// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0, -// lastReceiptCount -// ) -// // downloadedReceipts.sort((a, b) => -// // a.cycleRecord.counter > b.cycleRecord.counter ? 1 : -1 -// // ); -// // oldReceipts.sort((a, b) => -// // a.cycleRecord.counter > b.cycleRecord.counter ? 1 : -1 -// // ); -// let success = false -// let receiptsToMatchCount = 10 -// for (let i = 0; i < downloadedReceipts.length; i++) { -// let downloadedReceipt = downloadedReceipts[i] -// const oldReceipt = oldReceipts[i] -// if (oldReceipt.counter) delete oldReceipt.counter -// console.log(downloadedReceipt.receiptId, oldReceipt.receiptId) -// if (downloadedReceipt.receiptId !== oldReceipt.receiptId) { -// return { -// success, -// receiptsToMatchCount, -// } -// } -// success = true -// receiptsToMatchCount-- -// } -// return { success, receiptsToMatchCount } -// } -export async function compareWithOldOriginalTxsData(lastStoredOriginalTxCycle = 0): Promise { - const numberOfCyclesTocompare = 10 - let success = false - let matchedCycle = 0 - const endCycle = lastStoredOriginalTxCycle - const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 - const response = (await queryFromArchivers( - RequestDataType.ORIGINALTX, - { - startCycle, - endCycle, - type: 'tally', - }, - QUERY_TIMEOUT_MAX - )) as ArchiverOriginalTxResponse - - if (!response || !response.originalTxs) { - Logger.mainLogger.error(`Can't fetch original tx data from cycle ${startCycle} to cycle ${endCycle} from archivers`) - return { success, matchedCycle } - } - const downloadedOriginalTxsByCycles = response.originalTxs as OriginalTxDB.OriginalTxDataCount[] - - const oldOriginalTxCountByCycle = await OriginalTxDB.queryOriginalTxDataCountByCycles(startCycle, endCycle) - - for (let i = 0; i < downloadedOriginalTxsByCycles.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const downloadedOriginalTx = downloadedOriginalTxsByCycles[i] - // eslint-disable-next-line security/detect-object-injection - const oldOriginalTx = oldOriginalTxCountByCycle[i] - Logger.mainLogger.debug(downloadedOriginalTx, oldOriginalTx) - if ( - !downloadedOriginalTx || - !oldOriginalTx || - downloadedOriginalTx.cycle !== oldOriginalTx.cycle || - downloadedOriginalTx.originalTxDataCount !== oldOriginalTx.originalTxDataCount - ) { - return { - success, - matchedCycle, - } - } - success = true - matchedCycle = downloadedOriginalTx.cycle - } - success = true - return { success, matchedCycle } +export async function subscribeNodeForDataTransferWithDataSenders(): Promise { + return subscribeNodeForDataTransfer(dataSenders) } -export async function compareWithOldReceiptsData(lastStoredReceiptCycle = 0): Promise { - const numberOfCyclesTocompare = 10 - let success = false - let matchedCycle = 0 - const endCycle = lastStoredReceiptCycle - const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 - const response = (await queryFromArchivers( - RequestDataType.RECEIPT, - { - startCycle, - endCycle, - type: 'tally', - }, - QUERY_TIMEOUT_MAX - )) as ArchiverReceiptResponse - - if (!response || !response.receipts) { - Logger.mainLogger.error(`Can't fetch receipts data from cycle ${startCycle} to cycle ${endCycle} from archivers`) - return { success, matchedCycle } - } - const downloadedReceiptCountByCycles = response.receipts as ReceiptDB.ReceiptCount[] - - const oldReceiptCountByCycle = await ReceiptDB.queryReceiptCountByCycles(startCycle, endCycle) - for (let i = 0; i < downloadedReceiptCountByCycles.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const downloadedReceipt = downloadedReceiptCountByCycles[i] - // eslint-disable-next-line security/detect-object-injection - const oldReceipt = oldReceiptCountByCycle[i] - Logger.mainLogger.debug(downloadedReceipt, oldReceipt) - if ( - !downloadedReceipt || - !oldReceipt || - downloadedReceipt.cycle !== oldReceipt.cycle || - downloadedReceipt.receiptCount !== oldReceipt.receiptCount - ) { - return { - success, - matchedCycle, - } - } - success = true - matchedCycle = downloadedReceipt.cycle - } - success = true - return { success, matchedCycle } +export async function createDataTransferConnectionWithDataSenders(newSenderInfo: NodeList.ConsensusNodeInfo): Promise { + return createDataTransferConnection(newSenderInfo, dataSenders) } -export async function compareWithOldCyclesData(lastCycleCounter = 0): Promise { - try { - const numberOfCyclesTocompare = 10 - const start = lastCycleCounter - numberOfCyclesTocompare - const end = lastCycleCounter - const response = (await queryFromArchivers( - RequestDataType.CYCLE, - { - start, - end, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverCycleResponse - if (!response && !response.cycleInfo) { - throw Error(`Can't fetch data from cycle ${start} to cycle ${end} from archivers`) - } - const downloadedCycles = response.cycleInfo - const oldCycles = await CycleDB.queryCycleRecordsBetween(start, end) - let success = false - let matchedCycle = 0 - for (let i = 0; i < downloadedCycles.length; i++) { - // eslint-disable-next-line security/detect-object-injection - const downloadedCycle = downloadedCycles[i] - // eslint-disable-next-line security/detect-object-injection - const oldCycle = oldCycles[i] - if ( - !downloadedCycle || - !oldCycle || - StringUtils.safeStringify(downloadedCycle) !== StringUtils.safeStringify(oldCycle) - ) { - if (config.VERBOSE) { - Logger.mainLogger.error('Mismatched cycle Number', downloadedCycle.counter, oldCycle.counter) - } - return { - success, - matchedCycle, - } - } - success = true - matchedCycle = downloadedCycle.counter - } - return { success, matchedCycle } - } catch (error) { - Logger.mainLogger.error('compareWithOldCyclesData error: ' + error) - return { success: false, matchedCycle: 0 } - } +export async function subscribeConsensorsByConsensusRadiusWithDataSenders(): Promise { + return subscribeConsensorsByConsensusRadius(dataSenders) } -async function downloadOldCycles( - cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData, - lastStoredCycleCount: number +export async function subscribeNodeFromThisSubsetWithDataSenders( + nodeList: NodeList.ConsensusNodeInfo[], + roundIndex: number ): Promise { - let endCycle = cycleToSyncTo.counter - 1 - Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) - if (endCycle > lastStoredCycleCount) { - Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) - } - - let savedCycleRecord = cycleToSyncTo - const MAX_RETRY_COUNT = 3 - let retryCount = 0 - while (endCycle > lastStoredCycleCount) { - let startCycle: number = endCycle - MAX_CYCLES_PER_REQUEST - if (startCycle < 0) startCycle = 0 - if (startCycle < lastStoredCycleCount) startCycle = lastStoredCycleCount - Logger.mainLogger.debug(`Getting cycles ${startCycle} - ${endCycle} ...`) - const res = (await queryFromArchivers( - RequestDataType.CYCLE, - { - start: startCycle, - end: endCycle, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverCycleResponse - if (!res || !res.cycleInfo || !Array.isArray(res.cycleInfo) || res.cycleInfo.length === 0) { - Logger.mainLogger.error(`Can't fetch data from cycle ${startCycle} to cycle ${endCycle} from archivers`) - if (retryCount < MAX_RETRY_COUNT) { - retryCount++ - continue - } else { - endCycle = startCycle - 1 - retryCount = 0 - } - } - - const prevCycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] - if (prevCycles) prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) - - const combineCycles: P2PTypes.CycleCreatorTypes.CycleData[] = [] - for (const prevCycle of prevCycles) { - if (validateCycle(prevCycle, savedCycleRecord) === false) { - Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) - Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) - } - savedCycleRecord = prevCycle - combineCycles.push(prevCycle) - } - await storeCycleData(combineCycles) - endCycle = startCycle - 1 - } -} - -/** - * Syncs cycle data for a specific cycle - * @param cycle The cycle number to sync - * @returns True if successful, false otherwise - */ -export async function syncCycleData(cycle: number): Promise { - const MAX_RETRIES = 3 - let retryCount = 0 - let success = false - - Logger.mainLogger.debug(`syncCycleData: Starting sync for cycle ${cycle}`) - Logger.mainLogger.debug(`syncCycleData: Active nodes count: ${NodeList.activeListByIdSorted.length}`) - - while (!success && retryCount < MAX_RETRIES) { - try { - Logger.mainLogger.debug(`syncCycleData: Attempt ${retryCount + 1} for cycle ${cycle}`) - - const res = (await queryFromArchivers( - RequestDataType.CYCLE, - { - start: cycle, - end: cycle, - }, - QUERY_TIMEOUT_MAX - )) as ArchiverCycleResponse - - if (res && res.cycleInfo && res.cycleInfo.length > 0) { - const cycleData = res.cycleInfo[0] - Logger.mainLogger.debug(`syncCycleData: Received data for cycle ${cycle}, marker: ${cycleData.marker}`) - - if (!validateCycleData(cycleData)) { - Logger.mainLogger.error(`syncCycleData: Invalid cycle data for cycle ${cycle}`) - Logger.mainLogger.error(`syncCycleData: Cycle validation failed, checking marker computation...`) - nestedCountersInstance.countEvent('archiver', 'cycle_validation_failed - ' + cycle) - - // Debug marker computation - const cycleDataCopy = { ...cycleData } - delete cycleDataCopy.marker - const computedMarker = Cycles.computeCycleMarker(cycleDataCopy) - Logger.mainLogger.error( - `syncCycleData: Computed marker: ${computedMarker}, received marker: ${cycleData.marker}` - ) - - retryCount++ - continue - } - - await processCycles([cycleData]) - Logger.mainLogger.debug(`syncCycleData: Successfully synced and processed cycle ${cycle}`) - // Successfully synced cycle data for cycle - success = true - return true - } else { - Logger.mainLogger.error( - `syncCycleData: Failed to get cycle data for cycle ${cycle}, attempt ${retryCount + 1} of ${MAX_RETRIES}` - ) - retryCount++ - } - } catch (error) { - Logger.mainLogger.error(`syncCycleData: Error syncing cycle data for cycle ${cycle}: ${error}`) - retryCount++ - } - } - - Logger.mainLogger.error(`syncCycleData: All attempts to sync cycle ${cycle} failed`) - return false -} - -// We want to check that all of the provided certs are actually valid. This means we need to check that -// they all have the same marker, and that that marker is the same as the one of the original record. -// We also want all of the signer to actually be active node. And of course, the certs must pass sign -// verification. If we have already verified a cert in the past, we can skip it. It is possble for a -// malicious node to use valid certs from a honest record to get this function to return true. However, -// it will also have to make sure the inpMarker is the same as the markers as it is in the certs. If it -// does this, then the validateCycleData() function that gets called later will fail -function validateCerts( - certs: P2PTypes.CycleCreatorTypes.CycleCert[], - certSigners: Set, - inpMarker: string, - cycleData: P2PTypes.CycleCreatorTypes.CycleData -) { - nestedCountersInstance.countEvent('validateCerts', 'validation', 1) - Logger.mainLogger.debug(`validateCerts: Validating ${certs.length} certificates against marker ${inpMarker}`) - - for (const cert of certs) { - const cleanCert: P2PTypes.CycleCreatorTypes.CycleCert = { - marker: cert.marker, - sign: cert.sign, - } - if (cleanCert.marker !== inpMarker) { - nestedCountersInstance.countEvent('validateCerts', 'markerMismatch', 1) - - validationTracker.add({ cycle: cycleData }) - - return false - } - if (NodeList.activeListByIdSorted.some((node) => node.publicKey === cleanCert.sign.owner) === false) { - nestedCountersInstance.countEvent('validateCerts', 'badOwner', 1) - Logger.mainLogger.warn(`validateCerts: bad owner ${cleanCert.sign.owner} not found in active nodes`) - return false - } - if (certSigners.has(cert.sign.owner)) { - nestedCountersInstance.countEvent('validateCerts', 'skipExistingSigner', 1) - Logger.mainLogger.debug(`validateCerts: Skipping already verified cert from ${cert.sign.owner}`) - continue - } - if (!Crypto.verify(cleanCert)) { - nestedCountersInstance.countEvent('validateCerts', 'badSignature', 1) - Logger.mainLogger.warn(`validateCerts: bad signature from ${cleanCert.sign.owner}`) - return false - } - nestedCountersInstance.countEvent('validateCerts', 'validCert', 1) - } - - Logger.mainLogger.debug(`validateCerts: All certificates validated successfully`) - return true + return subscribeNodeFromThisSubset(nodeList, roundIndex, dataSenders) } -export function scoreCert(pubKey: string, prevMarker: P2PTypes.CycleCreatorTypes.CycleMarker): number { - try { - const node = NodeList.byPublicKey.get(pubKey) - const id = node.id // get node id from cert pub key - const obj = { id } - const hid = Crypto.hashObj(obj) // Omar - use hash of id so the cert is not made by nodes that are near based on node id - - const out = XOR(prevMarker, hid) - - // will also nerf if foundationNode is undefined, which is will be for already active nodes when we - // first turn on the addFoundationNodeAttribute flag under the current implementation - if (config.nerfNonFoundationCertScores && !node.foundationNode) { - return out & 0x0fffffff - } - - return out - } catch (err) { - Logger.mainLogger.error('scoreCert ERR:', err) - return 0 - } +export function addDataSenderWithDataSenders(sender: DataSender): void { + addDataSender(sender, dataSenders) } -// this function is needed since the cycle record is changed after Q3/Q4. Thus, the cycle certs will contain -// the marker of the cycle as it existed in Q3/Q4. However, the cycle that we ceived at the start of the -// function has been changed, so its marker has also been changed. If we try to check this new mark against -// the markers inside the certs, the validation will obviously fail. So we want to revert those changes on a -// deep copy so that we can get the original record -function getRecordWithoutPostQ3Changes(cycle: P2PTypes.CycleCreatorTypes.CycleRecord) { - Logger.mainLogger.debug(`getRecordWithoutPostQ3Changes: Processing cycle ${cycle.counter}`) - - const cycleCopy = StringUtils.safeJsonParse(StringUtils.safeStringify(cycle)) - delete cycleCopy.marker - delete cycleCopy.certificates - cycleCopy.nodeListHash = '' - cycleCopy.archiverListHash = '' - cycleCopy.standbyNodeListHash = '' - cycleCopy.joinedConsensors.forEach((jc) => (jc.syncingTimestamp = 0)) - return cycleCopy -} +// Add clearDataSenders wrapper +let subsetNodesMapByConsensusRadius: Map = new Map() +export async function clearDataSenders(): Promise { + await clearDataSendersImpl(dataSenders, socketClients, subsetNodesMapByConsensusRadius, unsubscribeDataSenderWithDataSenders) +} \ No newline at end of file diff --git a/src/Data/Data_old.ts b/src/Data/Data_old.ts new file mode 100644 index 00000000..2ebe8f89 --- /dev/null +++ b/src/Data/Data_old.ts @@ -0,0 +1,2996 @@ +import { EventEmitter } from 'events' +import { publicKey, SignedObject } from '@shardeum-foundation/lib-crypto-utils' +import * as Crypto from '../Crypto' +import * as NodeList from '../NodeList' +import * as Cycles from './Cycles' +import { + getCurrentCycleCounter, + currentCycleDuration, + processCycles, + validateCycle, + validateCycleData, + fetchCycleRecords, + getNewestCycleFromArchivers, + getNewestCycleFromConsensors, +} from './Cycles' +import { ChangeSquasher, parse, totalNodeCount, activeNodeCount, applyNodeListChange } from './CycleParser' +import * as State from '../State' +import * as P2P from '../P2P' +import * as Utils from '../Utils' +import { config, updateConfig } from '../Config' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as Logger from '../Logger' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { + storeReceiptData, + storeCycleData, + storeAccountData, + storingAccountData, + storeOriginalTxData, +} from './Collector' +import * as CycleDB from '../dbstore/cycles' +import * as ReceiptDB from '../dbstore/receipts' +import * as OriginalTxDB from '../dbstore/originalTxsData' +import * as StateMetaData from '../archivedCycle/StateMetaData' +import fetch from 'node-fetch' +import { syncV2 } from '../sync-v2' +import { queryFromArchivers, RequestDataType } from '../API' +import ioclient = require('socket.io-client') +import { Transaction } from '../dbstore/transactions' +import { AccountsCopy } from '../dbstore/accounts' +import { getJson } from '../P2P' +import { robustQuery } from '../Utils' +import { Utils as StringUtils } from '@shardeum-foundation/lib-types' +import { cachedCycleRecords, updateCacheFromDB } from '../cache/cycleRecordsCache' +import { XOR } from '../utils/general' +import { customFetch } from '../utils/customHttpFunctions' +import { ArchiverLogging } from '../profiler/archiverLogging' +import { Utils as UtilsTypes } from '@shardeum-foundation/lib-types' +import { logEnvSetup } from '../utils/environment' + +interface ValidationBreadcrumb { + cycle: P2PTypes.CycleCreatorTypes.CycleData +} + + +class ValidationTracker { + private seen = new Set() + private breadcrumbs: ValidationBreadcrumb[] = [] + private readonly MAX_ENTRIES = 1000 + + add(breadcrumb: ValidationBreadcrumb): void { + const key = `${breadcrumb.cycle.marker}:${breadcrumb.cycle.previous}` + + if (this.seen.has(key)) { + return + } + + if (this.breadcrumbs.length >= this.MAX_ENTRIES) { + const oldest = this.breadcrumbs.shift()! + this.seen.delete(`${oldest.cycle.marker}:${oldest.cycle.previous}`) + } + + Logger.mainLogger.warn('[ValidationTracker] Certificate validation failed', { + cycle: breadcrumb.cycle, + }) + + this.seen.add(key) + this.breadcrumbs.push(breadcrumb) + } +} + +const validationTracker = new ValidationTracker() + +export const socketClients: Map = new Map() +export let combineAccountsData = { + accounts: [], + receipts: [], +} +const forwardGenesisAccounts = true +export let currentConsensusRadius = 0 +export let nodesPerConsensusGroup = 0 +export let nodesPerEdge = 0 +let subsetNodesMapByConsensusRadius: Map = new Map() +const maxCyclesInCycleTracker = 5 +const receivedCycleTracker = {} +const QUERY_TIMEOUT_MAX = 30 // 30seconds +const { + MAX_ACCOUNTS_PER_REQUEST, + MAX_RECEIPTS_PER_REQUEST, + MAX_ORIGINAL_TXS_PER_REQUEST, + MAX_CYCLES_PER_REQUEST, + MAX_BETWEEN_CYCLES_PER_REQUEST, +} = config.REQUEST_LIMIT + +const GENESIS_ACCOUNTS_CYCLE_RANGE = { + startCycle: 0, + endCycle: 5, +} + +export enum DataRequestTypes { + SUBSCRIBE = 'SUBSCRIBE', + UNSUBSCRIBE = 'UNSUBSCRIBE', +} + +export interface DataRequest { + type: P2PTypes.SnapshotTypes.TypeName + lastData: P2PTypes.SnapshotTypes.TypeIndex +} + +interface DataResponse { + type: P2PTypes.SnapshotTypes.TypeName + data: T[] +} + +export interface CompareResponse { + success: boolean + matchedCycle: number +} + +interface ArchiverCycleResponse { + cycleInfo: P2PTypes.CycleCreatorTypes.CycleData[] +} + +interface ArchiverTransactionResponse { + totalTransactions: number + transactions: Transaction[] +} + +interface ArchiverAccountResponse { + totalAccounts: number + accounts: AccountsCopy[] +} + +interface ArchiverTotalDataResponse { + totalCycles: number + totalAccounts: number + totalTransactions: number + totalOriginalTxs: number + totalReceipts: number +} + +interface ArchiverReceiptResponse { + receipts: (ReceiptDB.Receipt | ReceiptDB.ReceiptCount)[] | number +} + +interface ArchiverReceiptCountResponse { + receipts: number +} + +interface ArchiverOriginalTxResponse { + originalTxs: (OriginalTxDB.OriginalTxData | OriginalTxDB.OriginalTxDataCount)[] | number +} + +interface ArchiverOriginalTxCountResponse { + originalTxs: number +} +interface IncomingTimes { + quarterDuration: number + startQ1: number + startQ2: number + startQ3: number + startQ4: number + end: number +} + +interface JoinStatus { + isJoined: boolean +} + +export type subscriptionCycleData = Omit & { + certificates: P2PTypes.CycleCreatorTypes.CycleCert[] +} + +export function createDataRequest( + type: P2PTypes.SnapshotTypes.TypeName, + lastData: P2PTypes.SnapshotTypes.TypeIndex, + recipientPk: publicKey +): DataRequest & Crypto.TaggedMessage { + return Crypto.tag>( + { + type, + lastData, + }, + recipientPk + ) +} + +export async function unsubscribeDataSender(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { + Logger.mainLogger.debug('Disconnecting previous connection', publicKey) + const sender = dataSenders.get(publicKey) + if (sender) { + // Clear contactTimeout associated with this sender + if (sender.contactTimeout) { + clearTimeout(sender.contactTimeout) + sender.contactTimeout = null + } + sendDataRequest(sender.nodeInfo, DataRequestTypes.UNSUBSCRIBE) + // Delete sender from dataSenders + dataSenders.delete(publicKey) + } + const socketClient = socketClients.get(publicKey) + if (socketClient) { + socketClient.emit('UNSUBSCRIBE', config.ARCHIVER_PUBLIC_KEY) + socketClient.close() + socketClients.delete(publicKey) + } + nestedCountersInstance.countEvent('archiver', 'remove_data_sender') + Logger.mainLogger.debug('Subscribed dataSenders', dataSenders.size, 'Connected socketClients', socketClients.size) + if (config.VERBOSE) + Logger.mainLogger.debug( + 'Subscribed dataSenders', + dataSenders.keys(), + 'Connected socketClients', + socketClients.keys() + ) +} + +export function initSocketClient(node: NodeList.ConsensusNodeInfo): void { + if (config.VERBOSE) Logger.mainLogger.debug('Node Info to socket connect', node) + + try { + const socketClient = ioclient.connect(`http://${node.ip}:${node.port}`, { + query: { + data: StringUtils.safeStringify( + Crypto.sign({ + publicKey: State.getNodeInfo().publicKey, + timestamp: Date.now(), + intendedConsensor: node.publicKey, + }) + ), + }, + }) + socketClients.set(node.publicKey, socketClient) + + socketClient.on('connect', () => { + Logger.mainLogger.debug(`✅ New Socket Connection to consensus node ${node.ip}:${node.port} is made`) + if (config.VERBOSE) Logger.mainLogger.debug('Connected node', node) + if (config.VERBOSE) Logger.mainLogger.debug('Init socketClients', socketClients.size, dataSenders.size) + }) + + socketClient.once('disconnect', async () => { + Logger.mainLogger.debug(`Connection request is refused by the consensor node ${node.ip}:${node.port}`) + }) + + socketClient.on('DATA', (data: string) => { + const newData: DataResponse & Crypto.TaggedMessage = + StringUtils.safeJsonParse(data) + if (!newData || !newData.responses) return + if (newData.recipient !== State.getNodeInfo().publicKey) { + Logger.mainLogger.debug('This data is not meant for this archiver') + return + } + + // If tag is invalid, dont keepAlive, END + if (Crypto.authenticate(newData) === false) { + Logger.mainLogger.debug('This data cannot be authenticated') + unsubscribeDataSender(node.publicKey) + return + } + + if (config.experimentalSnapshot) { + // Get sender entry + let sender = dataSenders.get(newData.publicKey) + // If no sender entry, remove publicKey from senders, END + if (!sender) { + Logger.mainLogger.error('This sender is not in the subscribed nodes list', newData.publicKey) + // unsubscribeDataSender(newData.publicKey) + return + } + // Clear senders contactTimeout, if it has one + if (sender.contactTimeout) { + if (config.VERBOSE) Logger.mainLogger.debug('Clearing contact timeout.') + clearTimeout(sender.contactTimeout) + sender.contactTimeout = null + nestedCountersInstance.countEvent('archiver', 'clear_contact_timeout') + } + + if (config.VERBOSE) console.log('DATA', sender.nodeInfo.publicKey, sender.nodeInfo.ip, sender.nodeInfo.port) + + if (newData.responses && newData.responses.ORIGINAL_TX_DATA) { + if (config.VERBOSE) + Logger.mainLogger.debug( + 'ORIGINAL_TX_DATA', + sender.nodeInfo.publicKey, + sender.nodeInfo.ip, + sender.nodeInfo.port, + newData.responses.ORIGINAL_TX_DATA.length + ) + // gracefully ignoring since it is now coupled with the receipt flow + // storeOriginalTxData( + // newData.responses.ORIGINAL_TX_DATA, + // sender.nodeInfo.ip + ':' + sender.nodeInfo.port, + // config.saveOnlyGossipData + // ) + } + if (newData.responses && newData.responses.RECEIPT) { + if (config.VERBOSE) + Logger.mainLogger.debug( + 'RECEIPT', + sender.nodeInfo.publicKey, + sender.nodeInfo.ip, + sender.nodeInfo.port, + newData.responses.RECEIPT.length + ) + storeReceiptData( + newData.responses.RECEIPT, + sender.nodeInfo.ip + ':' + sender.nodeInfo.port, + true, + config.saveOnlyGossipData, + true + ) + } + if (newData.responses && newData.responses.CYCLE) { + collectCycleData(newData.responses.CYCLE, sender.nodeInfo.ip + ':' + sender.nodeInfo.port, 'data-sender') + } + if (newData.responses && newData.responses.ACCOUNT) { + if (getCurrentCycleCounter() > GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle) { + Logger.mainLogger.error( + 'Account data is not meant to be received after the genesis accounts cycle range', + getCurrentCycleCounter() + ) + unsubscribeDataSender(sender.nodeInfo.publicKey) + return + } + if ( + Cycles.currentNetworkMode !== 'forming' || + NodeList.byPublicKey.size > 1 || + !NodeList.byPublicKey.has(sender.nodeInfo.publicKey) + ) { + Logger.mainLogger.error( + 'Account data is not meant to be received by the first validator', + `Number of nodes in the network ${NodeList.byPublicKey.size}` + ) + unsubscribeDataSender(sender.nodeInfo.publicKey) + return + } + Logger.mainLogger.debug(`RECEIVED ACCOUNTS DATA FROM ${sender.nodeInfo.ip}:${sender.nodeInfo.port}`) + nestedCountersInstance.countEvent('genesis', 'accounts', 1) + if (!forwardGenesisAccounts) { + Logger.mainLogger.debug('Genesis Accounts To Sycn', newData.responses.ACCOUNT) + syncGenesisAccountsFromConsensor(newData.responses.ACCOUNT, sender.nodeInfo) + } else { + if (storingAccountData) { + Logger.mainLogger.debug('Storing Account Data') + let newCombineAccountsData = { ...combineAccountsData } + if (newData.responses.ACCOUNT.accounts) + newCombineAccountsData.accounts = [ + ...newCombineAccountsData.accounts, + ...newData.responses.ACCOUNT.accounts, + ] + if (newData.responses.ACCOUNT.receipts) + newCombineAccountsData.receipts = [ + ...newCombineAccountsData.receipts, + ...newData.responses.ACCOUNT.receipts, + ] + combineAccountsData = { ...newCombineAccountsData } + newCombineAccountsData = { + accounts: [], + receipts: [], + } + } else storeAccountData(newData.responses.ACCOUNT) + } + } + + // Set new contactTimeout for sender. Postpone sender removal because data is still received from consensor + nestedCountersInstance.countEvent('archiver', 'postpone_contact_timeout') + // To make sure that the sender is still in the subscribed list + sender = dataSenders.get(newData.publicKey) + if (sender) + sender.contactTimeout = createContactTimeout( + sender.nodeInfo.publicKey, + 'This timeout is created after processing data' + ) + } + }) + } catch (error) { + console.error('Error occurred during socket connection:', error) + } +} + +export function collectCycleData( + cycleData: subscriptionCycleData[] | P2PTypes.CycleCreatorTypes.CycleData[], + senderInfo: string, + source: string +): void { + const startTime = Date.now() + const operationId = ArchiverLogging.generateOperationId() + + Logger.mainLogger.debug( + `collectCycleData: Processing ${cycleData.length} cycles from ${senderInfo}, source: ${source}` + ) + + nestedCountersInstance.countEvent('collectCycleData', 'cycles_received', cycleData.length) + nestedCountersInstance.countEvent('collectCycleData', 'source_' + source, 1) + + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: 0, + dataType: 'CYCLE_RECORD', + dataHash: '', + status: 'STARTED', + operationId, + metrics: { + duration: 0, + dataSize: StringUtils.safeStringify(cycleData).length, + }, + }) + + // check if the sender is in the nodelists + if (NodeList.activeListByIdSorted.length > 0) { + const [ip, port] = senderInfo.split(':') + const isInActiveNodes = NodeList.activeListByIdSorted.some( + (node) => node.ip === ip && node.port.toString() === port + ) + const isInActiveArchivers = State.activeArchivers.some( + (archiver) => archiver.ip === ip && archiver.port.toString() === port + ) + if (!isInActiveNodes && !isInActiveArchivers) { + nestedCountersInstance.countEvent('collectCycleData', 'sender_not_active', 1) + Logger.mainLogger.warn(`collectCycleData: Ignoring cycle data from non-active node: ${senderInfo}`) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: 0, + dataType: 'CYCLE_RECORD', + dataHash: '', + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycleData).length, + }, + error: 'Sender not in active nodes or archivers', + }) + return + } + } + + for (const cycle of cycleData) { + Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter}, marker: ${cycle.marker}`) + + if (receivedCycleTracker[cycle.counter]?.saved === true) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_already_saved_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} already saved, skipping`) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'COMPLETE', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + }) + break + } + + nestedCountersInstance.countEvent('collectCycleData', 'process_cycle_' + cycle.mode, 1) + + // since we can trust archivers and archiver only gossip after they have verified the cycleData + // we can just call processCycles here + if (source === 'archiver') { + nestedCountersInstance.countEvent('collectCycleData', 'direct_process_from_archiver', 1) + Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter} from archiver directly`) + processCycles([cycle as P2PTypes.CycleCreatorTypes.CycleData]) + continue + } + + let receivedCertSigners = [] + if (NodeList.activeListByIdSorted.length > 0) { + const certSigners = receivedCycleTracker[cycle.counter]?.[cycle.marker]?.['certSigners'] ?? new Set() + + try { + // need to get the hash(marker) of the cycle as it was in q3/q4 when the certs were made and compared + Logger.mainLogger.debug(`collectCycleData: Original cycle data: ${UtilsTypes.safeStringify(cycle)}`) + const cycleCopy = getRecordWithoutPostQ3Changes(cycle) + const computedMarker = Cycles.computeCycleMarker(cycleCopy) + Logger.mainLogger.debug(`collectCycleData: cycle copy ${UtilsTypes.safeStringify(cycleCopy)}`) + Logger.mainLogger.debug( + `collectCycleData: Computed marker for cycle ${cycle.counter}: ${computedMarker}, original marker: ${cycle.marker}` + ) + Logger.mainLogger.debug( + `collectCycleData: Validating ${(cycle as subscriptionCycleData).certificates?.length || 0} certificates for cycle ${cycle.counter}` + ) + + const validateCertsResult = validateCerts( + (cycle as subscriptionCycleData).certificates, + certSigners, + computedMarker, + cycleCopy as P2PTypes.CycleCreatorTypes.CycleData + ) + + if (validateCertsResult === false) { + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Certificate validation failed for cycle ${cycle.counter} from ${senderInfo} in ${cycle.mode} mode` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Certificate validation failed', + }) + break + } + + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_success_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Certificate validation successful for cycle ${cycle.counter}`) + } catch (error) { + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_error_' + cycle.mode, 1) + Logger.mainLogger.error( + `collectCycleData: Error during certificate validation for cycle ${cycle.counter}: ${error}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: `Certificate validation error: ${error.message}`, + }) + break + } + } + + receivedCertSigners = (cycle as subscriptionCycleData).certificates.map((cert) => cert.sign.owner) + Logger.mainLogger.debug( + `collectCycleData: Received ${receivedCertSigners.length} certificate signers for cycle ${cycle.counter}` + ) + delete (cycle as subscriptionCycleData).certificates + + if (receivedCycleTracker[cycle.counter]) { + if (receivedCycleTracker[cycle.counter][cycle.marker]) { + nestedCountersInstance.countEvent('collectCycleData', 'add_signers_to_existing_marker_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Adding signers to existing marker for cycle ${cycle.counter}`) + for (const signer of receivedCertSigners) + receivedCycleTracker[cycle.counter][cycle.marker]['certSigners'].add(signer) + } else { + if (!validateCycleData(cycle)) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Cycle data validation failed', + }) + continue + } + nestedCountersInstance.countEvent('collectCycleData', 'create_new_marker_entry_' + cycle.mode, 1) + Logger.mainLogger.debug( + `collectCycleData: Creating new marker entry for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + receivedCycleTracker[cycle.counter][cycle.marker] = { + cycleInfo: cycle, + certSigners: new Set(receivedCertSigners), + } + Logger.mainLogger.debug('Different Cycle Record received', cycle.counter) + } + receivedCycleTracker[cycle.counter]['received']++ + Logger.mainLogger.debug( + `collectCycleData: Cycle ${cycle.counter} received count: ${receivedCycleTracker[cycle.counter]['received']}` + ) + } else { + if (!validateCycleData(cycle)) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Cycle data validation failed', + }) + continue + } + nestedCountersInstance.countEvent('collectCycleData', 'create_new_cycle_tracker_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Creating new cycle tracker entry for cycle ${cycle.counter}`) + receivedCycleTracker[cycle.counter] = { + [cycle.marker]: { + cycleInfo: cycle, + certSigners: new Set(receivedCertSigners), + }, + received: 1, + saved: false, + } + } + if (config.VERBOSE) Logger.mainLogger.debug('Cycle received', cycle.counter, receivedCycleTracker[cycle.counter]) + + if (NodeList.activeListByIdSorted.length === 0) { + nestedCountersInstance.countEvent('collectCycleData', 'no_active_nodes_direct_process_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: No active nodes, processing cycle ${cycle.counter} directly`) + processCycles([receivedCycleTracker[cycle.counter][cycle.marker].cycleInfo]) + continue + } + + const requiredSenders = dataSenders.size ? Math.ceil(dataSenders.size / 2) : 1 + Logger.mainLogger.debug( + `collectCycleData: Cycle ${cycle.counter} requires ${requiredSenders} senders, current count: ${receivedCycleTracker[cycle.counter]['received']}` + ) + + if (receivedCycleTracker[cycle.counter]['received'] >= requiredSenders) { + nestedCountersInstance.countEvent('collectCycleData', 'enough_senders_process_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} has enough senders, processing`) + + let bestScore = 0 + let bestMarker = '' + let prevMarker = '' + + // If the cache is empty, update the cache from the db + // This change is to prevent the case where the archiver is not running when the cycle is created + // or the archiver is restarted and the cycle is not in the cache / fetching prev marker from empty cache + if (cachedCycleRecords.length === 0) { + updateCacheFromDB() + .then(() => { + // Verify if cachedCycleRecords[0] is the previous cycle + if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter > 1) { + Logger.mainLogger.debug(`updateCacheFromDB: No previous marker found for cycle ${cycle.counter}`) + } + processCycleWithPrevMarker() + }) + .catch((error) => { + Logger.mainLogger.error(`updateCacheFromDB: Error updating cache from db: ${error}`) + }) + } else { + processCycleWithPrevMarker() + } + + function processCycleWithPrevMarker() { + if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter === 1) { + prevMarker = cachedCycleRecords[0].marker + Logger.mainLogger.debug(`collectCycleData: Previous marker for scoring: ${prevMarker}`) + } else { + Logger.mainLogger.debug(`collectCycleData: No previous marker found for cycle ${cycle.counter}`) + return + } + // find the marker with largest sum of its top 3 cert scores + const markers = Object.entries(receivedCycleTracker[cycle.counter]) + .filter(([key]) => key !== 'saved' && key !== 'received') + .map(([, value]) => value) + + Logger.mainLogger.debug( + `collectCycleData: Found ${markers.length} different markers for cycle ${cycle.counter}` + ) + + for (const marker of markers) { + const scores = [] + for (const signer of marker['certSigners']) { + const score = scoreCert(signer as string, prevMarker) + scores.push(score) + Logger.mainLogger.debug(`collectCycleData: Cert from ${signer} scored ${score}`) + } + // get sum of top 3 scores: sort scores in desc order, then slice off first 3 elements, and add them + const sum = scores + .sort((a, b) => b - a) + .slice(0, 3) + .reduce((sum, score) => (sum += score), 0) + + Logger.mainLogger.debug(`collectCycleData: Marker ${marker['cycleInfo'].marker} scored ${sum}`) + + if (sum > bestScore) { + bestScore = sum + bestMarker = marker['cycleInfo'].marker + Logger.mainLogger.debug(`collectCycleData: New best marker: ${bestMarker} with score ${bestScore}`) + } + } + + Logger.mainLogger.debug( + `collectCycleData: Processing cycle ${cycle.counter} with best marker ${bestMarker}, score: ${bestScore}` + ) + processCycles([receivedCycleTracker[cycle.counter][bestMarker].cycleInfo]) + receivedCycleTracker[cycle.counter]['saved'] = true + + nestedCountersInstance.countEvent('collectCycleData', 'cycle_processed_successfully_' + cycle.mode, 1) + + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: bestMarker, + status: 'COMPLETE', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(receivedCycleTracker[cycle.counter][bestMarker].cycleInfo).length, + }, + }) + } + } + } + + if (Object.keys(receivedCycleTracker).length > maxCyclesInCycleTracker) { + nestedCountersInstance.countEvent('collectCycleData', 'cleanup_old_cycles', 1) + Logger.mainLogger.debug( + `collectCycleData: Cleaning up old cycles, current count: ${Object.keys(receivedCycleTracker).length}` + ) + for (const counter of Object.keys(receivedCycleTracker)) { + // Clear cycles that are older than last maxCyclesInCycleTracker cycles + if (parseInt(counter) < getCurrentCycleCounter() - maxCyclesInCycleTracker) { + let totalTimes = receivedCycleTracker[counter]['received'] + let logCycle = false + + const markers = Object.entries(receivedCycleTracker[counter]) + .filter(([key]) => key !== 'saved' && key !== 'received') + .map(([, value]) => value) + + // If there is more than one marker for this cycle, output the cycle log + if (markers.length > 1) { + logCycle = true + nestedCountersInstance.countEvent('collectCycleData', 'multiple_markers_for_cycle', 1) + } + + for (const marker of markers) { + Logger.mainLogger.debug( + 'Cycle', + counter, + marker, + /* eslint-disable security/detect-object-injection */ + logCycle ? StringUtils.safeStringify([...receivedCycleTracker[counter][marker]['certSigners']]) : '', + logCycle ? receivedCycleTracker[counter][marker] : '' + ) + } + if (logCycle) Logger.mainLogger.debug(`Cycle ${counter} has ${markers.length} different markers!`) + Logger.mainLogger.debug(`Received ${totalTimes} times for cycle counter ${counter}`) + // eslint-disable-next-line security/detect-object-injection + delete receivedCycleTracker[counter] + } + } + } +} + +export function clearCombinedAccountsData(): void { + combineAccountsData = { + accounts: [], + receipts: [], + } +} + +export interface DataSender { + nodeInfo: NodeList.ConsensusNodeInfo + types: (keyof typeof P2PTypes.SnapshotTypes.TypeNames)[] + contactTimeout?: NodeJS.Timeout | null + replaceTimeout?: NodeJS.Timeout | null +} + +export const dataSenders: Map = new Map() + +export const emitter = new EventEmitter() + +export async function replaceDataSender(publicKey: NodeList.ConsensusNodeInfo['publicKey']): Promise { + nestedCountersInstance.countEvent('archiver', 'replace_data_sender') + if (NodeList.getActiveNodeCount() < 2) { + Logger.mainLogger.debug('There is only one active node in the network. Unable to replace data sender') + return + } + Logger.mainLogger.debug(`replaceDataSender: replacing ${publicKey}`) + + if (!socketClients.has(publicKey) || !dataSenders.has(publicKey)) { + Logger.mainLogger.debug( + 'This data sender is not in the subscribed list! and unsubscribing it', + publicKey, + socketClients.has(publicKey), + dataSenders.has(publicKey) + ) + unsubscribeDataSender(publicKey) + return + } + unsubscribeDataSender(publicKey) + // eslint-disable-next-line security/detect-object-injection + const node = NodeList.byPublicKey.get(publicKey) + if (node) { + const nodeIndex = NodeList.activeListByIdSorted.findIndex((node) => node.publicKey === publicKey) + if (nodeIndex > -1) { + const subsetIndex = Math.floor(nodeIndex / currentConsensusRadius) + const subsetNodesList = subsetNodesMapByConsensusRadius.get(subsetIndex) + if (!subsetNodesList) { + Logger.mainLogger.error(`There is no nodes in the index ${subsetIndex} of subsetNodesMapByConsensusRadius!`) + return + } + subscribeNodeFromThisSubset(subsetNodesList, subsetIndex) + } + } +} + +export async function subscribeNodeForDataTransfer(): Promise { + if(config.passiveMode) { + Logger.mainLogger.debug('Archiver is in passive mode. Skipping data transfer subscription.') + return + } + + if (config.experimentalSnapshot) { + await subscribeConsensorsByConsensusRadius() + } else { + await StateMetaData.subscribeRandomNodeForDataTransfer() + } +} + +/** + * Sets 15s timeout + * Removes sender from dataSenders on timeout + * Select a new dataSender + */ +export function createContactTimeout(publicKey: NodeList.ConsensusNodeInfo['publicKey'], msg = ''): NodeJS.Timeout { + const CONTACT_TIMEOUT_MS = 10 * 1000 // Change contact timeout to 10s + if (config.VERBOSE) Logger.mainLogger.debug('Created contact timeout: ' + CONTACT_TIMEOUT_MS, `for ${publicKey}`) + nestedCountersInstance.countEvent('archiver', 'contact_timeout_created') + return setTimeout(() => { + // Logger.mainLogger.debug('nestedCountersInstance', nestedCountersInstance) + if (nestedCountersInstance) nestedCountersInstance.countEvent('archiver', 'contact_timeout') + Logger.mainLogger.debug('REPLACING sender due to CONTACT timeout', msg, publicKey) + replaceDataSender(publicKey) + }, CONTACT_TIMEOUT_MS) +} + +export function addDataSender(sender: DataSender): void { + dataSenders.set(sender.nodeInfo.publicKey, sender) +} + +async function syncFromNetworkConfig(): Promise { + try { + // Define the query function to get the network config from a node + const queryFn = async (node): Promise => { + const REQUEST_NETCONFIG_TIMEOUT_SECOND = 3 // 3s timeout + try { + const response = await P2P.getJson(`http://${node.ip}:${node.port}/netconfig`, REQUEST_NETCONFIG_TIMEOUT_SECOND) + return response + } catch (error) { + Logger.mainLogger.error(`Error querying node ${node.ip}:${node.port}: ${error}`) + return null + } + } + // Define the equality function to compare two responses + const equalityFn = (responseA, responseB): boolean => { + return responseA?.config?.sharding?.nodesPerConsensusGroup === responseB?.config?.sharding?.nodesPerConsensusGroup + } + // Get the list of 10 max random active nodes or the first node if no active nodes are available + const nodes = NodeList.getActiveNodeCount() > 0 ? NodeList.getRandomActiveNodes(10) : [NodeList.getFirstNode()] + // Use robustQuery to get the consensusRadius from multiple nodes + const tallyItem = await robustQuery( + nodes, + queryFn, + equalityFn, + 3 // Redundancy (minimum 3 nodes should return the same result to reach consensus) + ) + if (tallyItem?.value?.config?.stateManager) { + // Updating the Archiver Config as per the latest Network Config + const { + useNewPOQ: newPOQReceipt, + configChangeMaxChangesToKeep, + configChangeMaxCyclesToKeep, + maxCyclesShardDataToKeep, + } = tallyItem.value.config.stateManager + // const devPublicKeys = tallyItem.value.config.debug.devPublicKeys + // const devPublicKey = + // devPublicKeys && + // Object.keys(devPublicKeys).length >= 3 && + // Object.keys(devPublicKeys).find((key) => devPublicKeys[key] === 3) + // if ( + // devPublicKey && + // typeof devPublicKey === typeof config.DevPublicKey && + // devPublicKey !== config.DevPublicKey + // ) + // updateConfig({ DevPublicKey: devPublicKey })count query) + if ( + !Utils.isUndefined(newPOQReceipt) && + typeof newPOQReceipt === typeof config.newPOQReceipt && + newPOQReceipt !== config.newPOQReceipt + ) + updateConfig({ newPOQReceipt }) + if ( + !Utils.isUndefined(configChangeMaxChangesToKeep) && + typeof configChangeMaxChangesToKeep === typeof config.configChangeMaxChangesToKeep && + configChangeMaxChangesToKeep !== config.configChangeMaxChangesToKeep + ) + updateConfig({ configChangeMaxChangesToKeep }) + if ( + !Utils.isUndefined(configChangeMaxCyclesToKeep) && + typeof configChangeMaxCyclesToKeep === typeof config.configChangeMaxCyclesToKeep && + configChangeMaxCyclesToKeep !== config.configChangeMaxCyclesToKeep + ) + updateConfig({ configChangeMaxCyclesToKeep }) + if ( + !Utils.isUndefined(maxCyclesShardDataToKeep) && + typeof maxCyclesShardDataToKeep === typeof config.maxCyclesShardDataToKeep && + maxCyclesShardDataToKeep !== config.maxCyclesShardDataToKeep + ) + updateConfig({ maxCyclesShardDataToKeep }) + return tallyItem + } + return null + } catch (error) { + Logger.mainLogger.error('❌ Error in syncFromNetworkConfig: ', error) + return null + } +} + +export async function getConsensusRadius(): Promise { + // If there is no node, return existing currentConsensusRadius + if (NodeList.isEmpty()) return currentConsensusRadius + + const tallyItem = await syncFromNetworkConfig() + if (tallyItem?.value?.config) { + const nodesPerEdgeFromConfig = tallyItem.value.config.sharding?.nodesPerEdge + const nodesPerConsensusGroupFromConfig = tallyItem.value.config.sharding?.nodesPerConsensusGroup + + if (!Number.isInteger(nodesPerConsensusGroupFromConfig) || nodesPerConsensusGroupFromConfig <= 0) { + Logger.mainLogger.error('nodesPerConsensusGroup is not a valid number:', nodesPerConsensusGroupFromConfig) + return currentConsensusRadius + } + + if (!Number.isInteger(nodesPerEdgeFromConfig) || nodesPerEdgeFromConfig <= 0) { + Logger.mainLogger.error('nodesPerEdge is not a valid number:', nodesPerEdgeFromConfig) + return currentConsensusRadius + } + if (nodesPerConsensusGroup === nodesPerConsensusGroupFromConfig && nodesPerEdge === nodesPerEdgeFromConfig) + return currentConsensusRadius + nodesPerConsensusGroup = nodesPerConsensusGroupFromConfig + nodesPerEdge = nodesPerEdgeFromConfig + // Upgrading consensus size to an odd number + if (nodesPerConsensusGroup % 2 === 0) nodesPerConsensusGroup++ + const consensusRadius = Math.floor((nodesPerConsensusGroup - 1) / 2) + // Validation: Ensure consensusRadius is a number and greater than zero + if (typeof consensusRadius !== 'number' || isNaN(consensusRadius) || consensusRadius <= 0) { + Logger.mainLogger.error('Invalid consensusRadius:', consensusRadius) + return currentConsensusRadius // Return the existing currentConsensusRadius in case of invalid consensusRadius + } + Logger.mainLogger.debug( + 'consensusRadius', + consensusRadius, + 'nodesPerConsensusGroup', + nodesPerConsensusGroup, + 'nodesPerEdge', + nodesPerEdge + ) + return consensusRadius + } + Logger.mainLogger.error('Failed to get consensusRadius from the network') + // If no consensus was reached, return the existing currentConsensusRadius + return currentConsensusRadius +} + +export async function createDataTransferConnection(newSenderInfo: NodeList.ConsensusNodeInfo): Promise { + // // Verify node before subscribing for data transfer + // const status = await verifyNode(newSenderInfo) + // if (!status) return false + // Subscribe this node for dataRequest + const response = await sendDataRequest(newSenderInfo, DataRequestTypes.SUBSCRIBE) + if (response) { + initSocketClient(newSenderInfo) + // Add new dataSender to dataSenders + const newSender: DataSender = { + nodeInfo: newSenderInfo, + types: [P2PTypes.SnapshotTypes.TypeNames.CYCLE, P2PTypes.SnapshotTypes.TypeNames.STATE_METADATA], + contactTimeout: createContactTimeout( + newSenderInfo.publicKey, + 'This timeout is created during newSender selection' + ), + } + addDataSender(newSender) + Logger.mainLogger.debug(`added new sender ${newSenderInfo.publicKey} to dataSenders`) + } + return response +} + +function shouldSubscribeToMoreConsensors(): boolean { + return config.subscribeToMoreConsensors && currentConsensusRadius > 5 +} + +export async function createNodesGroupByConsensusRadius(): Promise { + const consensusRadius = await getConsensusRadius() + if (consensusRadius === 0) { + Logger.mainLogger.error('Consensus radius is 0, unable to create nodes group.') + return // Early return to prevent further execution + } + currentConsensusRadius = consensusRadius + const activeList = [...NodeList.activeListByIdSorted] + if (config.VERBOSE) Logger.mainLogger.debug('activeList', activeList.length, activeList) + let totalNumberOfNodesToSubscribe = Math.ceil(activeList.length / consensusRadius) + if (shouldSubscribeToMoreConsensors()) { + totalNumberOfNodesToSubscribe += totalNumberOfNodesToSubscribe * config.extraConsensorsToSubscribe + } + Logger.mainLogger.debug('totalNumberOfNodesToSubscribe', totalNumberOfNodesToSubscribe) + subsetNodesMapByConsensusRadius = new Map() + let round = 0 + for (let i = 0; i < activeList.length; i += consensusRadius) { + const subsetList: NodeList.ConsensusNodeInfo[] = activeList.slice(i, i + consensusRadius) + subsetNodesMapByConsensusRadius.set(round, subsetList) + round++ + } + if (config.VERBOSE) Logger.mainLogger.debug('subsetNodesMapByConsensusRadius', subsetNodesMapByConsensusRadius) +} + +export async function subscribeConsensorsByConsensusRadius(): Promise { + await createNodesGroupByConsensusRadius() + for (const [i, subsetList] of subsetNodesMapByConsensusRadius) { + if (config.VERBOSE) Logger.mainLogger.debug('Round', i, 'subsetList', subsetList, dataSenders.keys()) + subscribeNodeFromThisSubset(subsetList, i) + } +} + +export async function subscribeNodeFromThisSubset( + nodeList: NodeList.ConsensusNodeInfo[], + roundIndex: number +): Promise { + // First check if there is any subscribed node from this subset + const subscribedNodesFromThisSubset = [] + for (const node of nodeList) { + if (dataSenders.has(node.publicKey)) { + if (config.VERBOSE) + Logger.mainLogger.debug('This node from the subset is in the subscribed list!', node.publicKey) + subscribedNodesFromThisSubset.push(node.publicKey) + } + } + let numberOfNodesToSubsribe = 1 + if (shouldSubscribeToMoreConsensors()) { + numberOfNodesToSubsribe += config.extraConsensorsToSubscribe + nestedCountersInstance.countEvent( + 'nodeSubscription', + 'add extra consensor(s): ' + config.extraConsensorsToSubscribe + ) + } else { + nestedCountersInstance.countEvent('nodeSubscription', 'add consensor: ') + } + if (subscribedNodesFromThisSubset.length > numberOfNodesToSubsribe) { + // If there is more than one subscribed node from this subset, unsubscribe the extra ones + for (const publicKey of subscribedNodesFromThisSubset.splice(numberOfNodesToSubsribe)) { + Logger.mainLogger.debug('Unsubscribing extra node from this subset', publicKey) + unsubscribeDataSender(publicKey) + } + } + if (config.VERBOSE) Logger.mainLogger.debug('Subscribed nodes from this subset', subscribedNodesFromThisSubset) + if (subscribedNodesFromThisSubset.length === numberOfNodesToSubsribe) return + Logger.mainLogger.debug( + `Subscribing node(s) from this subset! numberOfNodesToSubsribe: ${numberOfNodesToSubsribe} roundIndex: ${roundIndex}` + ) + // Pick a new dataSender from this subset + let subsetList = [...nodeList] + // Pick a random dataSender + let newSenderInfo = nodeList[Math.floor(Math.random() * nodeList.length)] + let connectionStatus = false + let retry = 0 + const MAX_RETRY_SUBSCRIPTION = 3 * numberOfNodesToSubsribe + while (retry < MAX_RETRY_SUBSCRIPTION && subscribedNodesFromThisSubset.length < numberOfNodesToSubsribe) { + if (!dataSenders.has(newSenderInfo.publicKey)) { + connectionStatus = await createDataTransferConnection(newSenderInfo) + if (connectionStatus) { + // Check if the newSender is in the subscribed nodes of this subset + if (!subscribedNodesFromThisSubset.includes(newSenderInfo.publicKey)) { + subscribedNodesFromThisSubset.push(newSenderInfo.publicKey) + Logger.mainLogger.debug( + `Added new sender to the subscribed nodes of this subset. publicKey:${newSenderInfo.publicKey}, numberOfNodesToSubsribe:${numberOfNodesToSubsribe}, roundIndex${roundIndex}` + ) + } + } + } else { + // Add the newSender to the subscribed nodes of this subset + if (!subscribedNodesFromThisSubset.includes(newSenderInfo.publicKey)) { + subscribedNodesFromThisSubset.push(newSenderInfo.publicKey) + Logger.mainLogger.debug( + `accounting for existing? sender to the subscribed nodes of this subset. publicKey:${newSenderInfo.publicKey}, numberOfNodesToSubsribe:${numberOfNodesToSubsribe}, roundIndex${roundIndex}` + ) + } + } + subsetList = subsetList.filter((node) => node.publicKey !== newSenderInfo.publicKey) + if (subsetList.length > 0) { + newSenderInfo = subsetList[Math.floor(Math.random() * subsetList.length)] + } else { + subsetList = [...nodeList] + retry++ + } + } +} + +// This function is used for both subscribe and unsubscribe for data request +export async function sendDataRequest( + nodeInfo: NodeList.ConsensusNodeInfo, + dataRequestType: DataRequestTypes +): Promise { + const dataRequest = { + dataRequestCycle: getCurrentCycleCounter(), + dataRequestType, + publicKey: State.getNodeInfo().publicKey, + nodeInfo: State.getNodeInfo(), + } + const taggedDataRequest = Crypto.tag(dataRequest, nodeInfo.publicKey) + Logger.mainLogger.info(`Sending ${dataRequestType} data request to consensor.`, nodeInfo.ip + ':' + nodeInfo.port) + let reply = false + const REQUEST_DATA_TIMEOUT_SECOND = 2 // 2s timeout + const response = await P2P.postJson( + `http://${nodeInfo.ip}:${nodeInfo.port}/requestdata`, + taggedDataRequest, + REQUEST_DATA_TIMEOUT_SECOND + ) + Logger.mainLogger.debug('/requestdata response', response, nodeInfo.ip + ':' + nodeInfo.port) + if (response && response.success) reply = response.success + return reply +} + +export const clearDataSenders = async (): Promise => { + for (const [publicKey] of dataSenders) { + unsubscribeDataSender(publicKey) + } + await Utils.sleep(2000) // Wait for 2s to make sure all dataSenders are unsubscribed + dataSenders.clear() + socketClients.clear() + subsetNodesMapByConsensusRadius.clear() +} + +export function calcIncomingTimes(record: P2PTypes.CycleCreatorTypes.CycleRecord): IncomingTimes { + const SECOND = 1000 + const cycleDuration = record.duration * SECOND + const quarterDuration = cycleDuration / 4 + const start = record.start * SECOND + cycleDuration + const startQ1 = start + const startQ2 = start + quarterDuration + const startQ3 = start + 2 * quarterDuration + const startQ4 = start + 3 * quarterDuration + const end = start + cycleDuration + return { quarterDuration, startQ1, startQ2, startQ3, startQ4, end } +} + +export async function joinNetwork(nodeList: NodeList.ConsensusNodeInfo[], isFirstTime: boolean): Promise { + if (config.passiveMode) { + Logger.mainLogger.debug('joinNetwork-skipped passive mode') + return true + } + + + Logger.mainLogger.debug('Is firstTime', isFirstTime) + if (!isFirstTime) { + const isJoined: boolean = await checkJoinStatus(nodeList) + if (isJoined) { + return isJoined + } + } + Logger.mainLogger.debug('nodeList To Submit Join Request', nodeList) + // try to get latestCycleRecord with a robust query + const latestCycle = await getNewestCycleFromConsensors(nodeList) + + // Figure out when Q1 is from the latestCycle + const { startQ1 } = calcIncomingTimes(latestCycle) + const shuffledNodes = [...nodeList] + Utils.shuffleArray(shuffledNodes) + + // Wait until a Q1 then send join request to active nodes + let untilQ1 = startQ1 - Date.now() + while (untilQ1 < 0) { + untilQ1 += latestCycle.duration * 1000 + } + + Logger.mainLogger.debug(`Waiting ${untilQ1 + 500} ms for Q1 before sending join...`) + await Utils.sleep(untilQ1 + 500) // Not too early + + // Create a fresh join request, so that the request timestamp range is acceptable + const request = P2P.createArchiverJoinRequest() + await submitJoin(nodeList, request) + + // Wait approx. one cycle then check again + Logger.mainLogger.debug('Waiting approx. one cycle then checking again...') + await Utils.sleep(latestCycle.duration * 1000 + 500) + return false +} + +export async function submitJoin( + nodes: NodeList.ConsensusNodeInfo[], + joinRequest: P2P.ArchiverJoinRequest & SignedObject +): Promise { + if (config.passiveMode) { + Logger.mainLogger.debug('submitJoin-skipped passive mode') + return + } + // Send the join request to a handful of the active node all at once:w + const selectedNodes = Utils.getRandom(nodes, Math.min(nodes.length, 5)) + Logger.mainLogger.debug(`Sending join request to ${selectedNodes.map((n) => `${n.ip}:${n.port}`)}`) + for (const node of selectedNodes) { + const response = await P2P.postJson(`http://${node.ip}:${node.port}/joinarchiver`, joinRequest) + Logger.mainLogger.debug('Join request response:', response) + } +} + +export async function sendLeaveRequest(nodes: NodeList.ConsensusNodeInfo[]): Promise { + if (config.passiveMode) { + Logger.mainLogger.debug('sendLeaveRequest-skipped passive mode') + return + } + + const leaveRequest = P2P.createArchiverLeaveRequest() + Logger.mainLogger.debug(`Sending leave request to ${nodes.map((n) => `${n.ip}:${n.port}`)}`) + + const promises = nodes.map((node) => + customFetch(`http://${node.ip}:${node.port}/leavingarchivers`, { + method: 'post', + body: StringUtils.safeStringify(leaveRequest), + headers: { 'Content-Type': 'application/json' }, + timeout: 2 * 1000, // 2s timeout + }).then((res) => res.json()) + ) + + await Promise.allSettled(promises) + .then((responses) => { + let i = 0 + let isLeaveRequestSent = false + for (const response of responses) { + // eslint-disable-next-line security/detect-object-injection + const node = nodes[i] + if (response.status === 'fulfilled') { + const res = response.value + if (res.success) isLeaveRequestSent = true + Logger.mainLogger.debug(`Leave request response from ${node.ip}:${node.port}:`, res) + } else Logger.mainLogger.debug(`Node is not responding ${node.ip}:${node.port}`) + i++ + } + Logger.mainLogger.debug('isLeaveRequestSent', isLeaveRequestSent) + }) + .catch((error) => { + // Handle any errors that occurred + console.error(error) + }) +} + +export async function sendActiveRequest(): Promise { + if (config.passiveMode) { + Logger.mainLogger.debug('sendActiveRequest-skipped passive mode') + return + } + + Logger.mainLogger.debug('Sending Active Request to the network!') + const latestCycleInfo = await CycleDB.queryLatestCycleRecords(1) + const latestCycle = latestCycleInfo[0] + // Figure out when Q1 is from the latestCycle + const { startQ1 } = calcIncomingTimes(latestCycle) + + // Wait until a Q1 then send active request to active nodes + let untilQ1 = startQ1 - Date.now() + while (untilQ1 < 0) { + untilQ1 += latestCycle.duration * 1000 + } + + Logger.mainLogger.debug(`Waiting ${untilQ1 + 500} ms for Q1 before sending active...`) + await Utils.sleep(untilQ1 + 500) // Not too early + + const activeRequest = P2P.createArchiverActiveRequest() + // Send the active request to a handful of the active node all at once:w + const nodes = NodeList.getRandomActiveNodes(5) + Logger.mainLogger.debug(`Sending active request to ${nodes.map((n) => `${n.ip}:${n.port}`)}`) + + const promises = nodes.map((node) => + customFetch(`http://${node.ip}:${node.port}/activearchiver`, { + method: 'post', + body: StringUtils.safeStringify(activeRequest), + headers: { 'Content-Type': 'application/json' }, + timeout: 2 * 1000, // 2s timeout + }).then((res) => res.json()) + ) + + await Promise.allSettled(promises) + .then((responses) => { + let i = 0 + for (const response of responses) { + // eslint-disable-next-line security/detect-object-injection + const node = nodes[i] + if (response.status === 'fulfilled') { + const res = response.value + Logger.mainLogger.debug(`Active request response from ${node.ip}:${node.port}:`, res) + } else Logger.mainLogger.debug(`Node is not responding ${node.ip}:${node.port}`) + i++ + } + }) + .catch((error) => { + // Handle any errors that occurred + console.error(error) + }) + + // Wait approx. one cycle then check again + Logger.mainLogger.debug('Waiting approx. one cycle then checking again...') + await Utils.sleep(latestCycle.duration * 1000 + 500) +} + +export async function getCycleDuration(): Promise { + const response = (await queryFromArchivers(RequestDataType.CYCLE, { count: 1 })) as ArchiverCycleResponse + if (response && response.cycleInfo) { + return response.cycleInfo[0].duration + } + return 0 +} + +/* + checkJoinStatus checks if the current archiver node is joined to a network. + This queries by the /joinedArchiver endpoint on the nodes and returns joining status based on majority response. +*/ +export async function checkJoinStatus(activeNodes: NodeList.ConsensusNodeInfo[]): Promise { + if (config.passiveMode) { + Logger.mainLogger.debug('checkJoinStatus-skipped passive mode') + return false + } + + Logger.mainLogger.debug('checkJoinStatus: Checking join status') + const ourNodeInfo = State.getNodeInfo() + + const queryFn = async (node: NodeList.ConsensusNodeInfo): Promise => { + const url = `http://${node.ip}:${node.port}/joinedArchiver/${ourNodeInfo.publicKey}` + try { + return (await getJson(url)) as JoinStatus + } catch (e) { + Logger.mainLogger.error(`Error querying node ${node.ip}:${node.port}: ${e}`) + throw e + } + } + + try { + const joinStatus = await robustQuery(activeNodes, queryFn) + Logger.mainLogger.debug(`checkJoinStatus: Join status: ${joinStatus.value.isJoined}`) + return joinStatus.value.isJoined + } catch (e) { + Logger.mainLogger.error(`Error in checkJoinStatus: ${e}`) + return false + } +} + +// This will be used once activeArchivers field is added to the cycle record +export async function checkActiveStatus(): Promise { + if (config.checkActiveStatus) { + console.log('checkJoinStatus-skipped passive mode') + return false + } + + Logger.mainLogger.debug('Checking active status') + const ourNodeInfo = State.getNodeInfo() + try { + const latestCycle = await getNewestCycleFromArchivers() + + if (latestCycle && latestCycle['activeArchivers']) { + const activeArchivers = latestCycle['activeArchivers'] + Logger.mainLogger.debug('cycle counter', latestCycle.counter) + Logger.mainLogger.debug('Active archivers', activeArchivers) + + const isActive = activeArchivers.some((a: State.ArchiverNodeInfo) => a.publicKey === ourNodeInfo.publicKey) + Logger.mainLogger.debug('isActive', isActive) + return isActive + } else { + return false + } + } catch (e) { + Logger.mainLogger.error(e) + return false + } +} + +export async function getTotalDataFromArchivers(): Promise { + const res = (await queryFromArchivers( + RequestDataType.TOTALDATA, + {}, + QUERY_TIMEOUT_MAX + )) as ArchiverTotalDataResponse | null + // @ts-ignore + if (!res || (res.success !== undefined && res.success === false)) { + return null + } + return res +} + +export async function syncGenesisAccountsFromArchiver(): Promise { + let complete = false + let startAccount = 0 + let endAccount = startAccount + MAX_ACCOUNTS_PER_REQUEST + let totalGenesisAccounts = 0 + // const totalExistingGenesisAccounts = + // await AccountDB.queryAccountCountBetweenCycles(0, 5); + // if (totalExistingGenesisAccounts > 0) { + // // Let's assume it has synced data for now, update to sync account count between them + // return; + // } + const res = (await queryFromArchivers( + RequestDataType.ACCOUNT, + { startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle }, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Accounts Response', StringUtils.safeStringify(res)) + if (res && (res.totalAccounts || res.totalAccounts === 0)) { + totalGenesisAccounts = res.totalAccounts + Logger.mainLogger.debug('TotalGenesis Accounts', totalGenesisAccounts) + } else { + Logger.mainLogger.error('Genesis Total Accounts Query', 'Invalid download response') + return + } + if (totalGenesisAccounts <= 0) return + let page = 1 + while (!complete) { + Logger.mainLogger.debug(`Downloading accounts from ${startAccount} to ${endAccount}`) + const response = (await queryFromArchivers( + RequestDataType.ACCOUNT, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + page, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (response && response.accounts) { + if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { + complete = true + Logger.mainLogger.debug('Download completed for accounts') + } + Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) + await storeAccountData({ accounts: response.accounts }) + startAccount = endAccount + 1 + endAccount += MAX_ACCOUNTS_PER_REQUEST + page++ + } else { + Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') + } + // await sleep(1000); + } + Logger.mainLogger.debug('Sync genesis accounts completed!') +} + +export async function syncGenesisTransactionsFromArchiver(): Promise { + let complete = false + let startTransaction = 0 + let endTransaction = startTransaction + MAX_ACCOUNTS_PER_REQUEST // Sames as number of accounts per request + let totalGenesisTransactions = 0 + + const res = (await queryFromArchivers( + RequestDataType.TRANSACTION, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverTransactionResponse + if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Transaction Response', StringUtils.safeStringify(res)) + if (res && (res.totalTransactions || res.totalTransactions === 0)) { + totalGenesisTransactions = res.totalTransactions + Logger.mainLogger.debug('TotalGenesis Transactions', totalGenesisTransactions) + } else { + Logger.mainLogger.error('Genesis Total Transaction Query', 'Invalid download response') + return + } + if (totalGenesisTransactions <= 0) return + let page = 1 + while (!complete) { + Logger.mainLogger.debug(`Downloading transactions from ${startTransaction} to ${endTransaction}`) + const response = (await queryFromArchivers( + RequestDataType.TRANSACTION, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + page, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverTransactionResponse + if (response && response.transactions) { + if (response.transactions.length < MAX_ACCOUNTS_PER_REQUEST) { + complete = true + Logger.mainLogger.debug('Download completed for transactions') + } + Logger.mainLogger.debug(`Downloaded transactions`, response.transactions.length) + await storeAccountData({ receipts: response.transactions }) + startTransaction = endTransaction + 1 + endTransaction += MAX_ACCOUNTS_PER_REQUEST + page++ + } else { + Logger.mainLogger.debug('Genesis Transactions Query', 'Invalid download response') + } + // await sleep(1000); + } + Logger.mainLogger.debug('Sync genesis transactions completed!') +} + +export async function syncGenesisAccountsFromConsensor( + totalGenesisAccounts = 0, + firstConsensor: NodeList.ConsensusNodeInfo +): Promise { + if (totalGenesisAccounts <= 0) return + let startAccount = 0 + // let combineAccountsData = []; + let totalDownloadedAccounts = 0 + while (startAccount <= totalGenesisAccounts) { + Logger.mainLogger.debug(`Downloading accounts from ${startAccount}`) + const response = (await P2P.getJson( + `http://${firstConsensor.ip}:${firstConsensor.port}/genesis_accounts?start=${startAccount}`, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (response && response.accounts) { + if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { + Logger.mainLogger.debug('Download completed for accounts') + } + Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) + // TODO - update to include receipts data also + await storeAccountData({ accounts: response.accounts }) + // combineAccountsData = [...combineAccountsData, ...response.accounts]; + totalDownloadedAccounts += response.accounts.length + startAccount += MAX_ACCOUNTS_PER_REQUEST + } else { + Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') + } + // await sleep(1000); + } + Logger.mainLogger.debug(`Total downloaded accounts`, totalDownloadedAccounts) + // await storeAccountData(combineAccountsData); + Logger.mainLogger.debug('Sync genesis accounts completed!') +} + +export async function buildNodeListFromStoredCycle( + lastStoredCycle: P2PTypes.CycleCreatorTypes.CycleData +): Promise { + Logger.mainLogger.debug('lastStoredCycle', lastStoredCycle) + Logger.mainLogger.debug('buildNodeListFromStoredCycle:') + Logger.mainLogger.debug(`Syncing till cycle ${lastStoredCycle.counter}...`) + const cyclesToGet = 2 * Math.floor(Math.sqrt(lastStoredCycle.active)) + 2 + Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) + + const CycleChain = [] + const squasher = new ChangeSquasher() + + CycleChain.unshift(lastStoredCycle) + squasher.addChange(parse(CycleChain[0])) + + do { + // Get prevCycles from the network + let end: number = CycleChain[0].counter - 1 + let start: number = end - cyclesToGet + if (start < 0) start = 0 + if (end < start) end = start + Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) + const prevCycles = await CycleDB.queryCycleRecordsBetween(start, end) + + // If prevCycles is empty, start over + if (prevCycles.length < 1) throw new Error('Got empty previous cycles') + + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + // Add prevCycles to our cycle chain + let prepended = 0 + for (const prevCycle of prevCycles) { + // Prepend the cycle to our cycle chain + CycleChain.unshift(prevCycle) + squasher.addChange(parse(prevCycle)) + prepended++ + + if ( + squasher.final.updated.length >= activeNodeCount(lastStoredCycle) && + squasher.final.added.length >= totalNodeCount(lastStoredCycle) + ) { + break + } + } + + Logger.mainLogger.debug( + `Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(lastStoredCycle)}` + ) + Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(lastStoredCycle)}`) + if (squasher.final.added.length < totalNodeCount(lastStoredCycle)) + Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + lastStoredCycle.counter) + + // If you weren't able to prepend any of the prevCycles, start over + if (prepended < 1) throw new Error('Unable to prepend any previous cycles') + } while ( + squasher.final.updated.length < activeNodeCount(lastStoredCycle) || + squasher.final.added.length < totalNodeCount(lastStoredCycle) + ) + + applyNodeListChange(squasher.final) + Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) + Cycles.setCurrentCycleCounter(lastStoredCycle.counter) + Cycles.setCurrentCycleMarker(lastStoredCycle.marker) + Cycles.setCurrentCycleDuration(lastStoredCycle.duration) + Logger.mainLogger.debug('Latest cycle after sync', lastStoredCycle.counter) +} + +export async function syncCyclesAndNodeList(lastStoredCycleCount = 0): Promise { + // Get the networks newest cycle as the anchor point for sync + Logger.mainLogger.debug('Getting newest cycle...') + const cycleToSyncTo = await getNewestCycleFromArchivers() + Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) + Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) + + const cyclesToGet = 2 * Math.floor(Math.sqrt(cycleToSyncTo.active)) + 2 + Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) + + const CycleChain = [] + const squasher = new ChangeSquasher() + + CycleChain.unshift(cycleToSyncTo) + squasher.addChange(parse(CycleChain[0])) + + do { + // Get prevCycles from the network + let end: number = CycleChain[0].counter - 1 + let start: number = end - cyclesToGet + if (start < 0) start = 0 + if (end < start) end = start + Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) + const prevCycles = await fetchCycleRecords(start, end) + + // If prevCycles is empty, start over + if (prevCycles.length < 1) throw new Error('Got empty previous cycles') + + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + // Add prevCycles to our cycle chain + let prepended = 0 + for (const prevCycle of prevCycles) { + // Stop prepending prevCycles if one of them is invalid + if (validateCycle(prevCycle, CycleChain[0]) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + break + } + // Prepend the cycle to our cycle chain + CycleChain.unshift(prevCycle) + squasher.addChange(parse(prevCycle)) + prepended++ + + if ( + squasher.final.updated.length >= activeNodeCount(cycleToSyncTo) && + squasher.final.added.length >= totalNodeCount(cycleToSyncTo) + ) { + break + } + } + + Logger.mainLogger.debug(`Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(cycleToSyncTo)}`) + Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(cycleToSyncTo)}`) + if (squasher.final.added.length < totalNodeCount(cycleToSyncTo)) + Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + cycleToSyncTo.counter) + + // If you weren't able to prepend any of the prevCycles, start over + if (prepended < 1) throw new Error('Unable to prepend any previous cycles') + } while ( + squasher.final.updated.length < activeNodeCount(cycleToSyncTo) || + squasher.final.added.length < totalNodeCount(cycleToSyncTo) + ) + + applyNodeListChange(squasher.final) + Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) + + for (let i = 0; i < CycleChain.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const record = CycleChain[i] + Cycles.CycleChain.set(record.counter, { ...record }) + if (i === CycleChain.length - 1) await storeCycleData(CycleChain) + Cycles.setCurrentCycleCounter(record.counter) + Cycles.setCurrentCycleMarker(record.marker) + } + Logger.mainLogger.debug('Cycle chain is synced. Size of CycleChain', Cycles.CycleChain.size) + + // Download old cycle Records + let endCycle = CycleChain[0].counter - 1 + Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) + if (endCycle > lastStoredCycleCount) { + Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) + } + let savedCycleRecord = CycleChain[0] + while (endCycle > lastStoredCycleCount) { + let nextEnd: number = endCycle - MAX_CYCLES_PER_REQUEST + if (nextEnd < 0) nextEnd = 0 + Logger.mainLogger.debug(`Getting cycles ${nextEnd} - ${endCycle} ...`) + const prevCycles = await fetchCycleRecords(nextEnd, endCycle) + + // If prevCycles is empty, start over + if (!prevCycles || prevCycles.length < 1) throw new Error('Got empty previous cycles') + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + // Add prevCycles to our cycle chain + const combineCycles = [] + for (const prevCycle of prevCycles) { + // Stop saving prevCycles if one of them is invalid + if (validateCycle(prevCycle, savedCycleRecord) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) + break + } + savedCycleRecord = prevCycle + combineCycles.push(prevCycle) + } + await storeCycleData(combineCycles) + endCycle = nextEnd - 1 + } +} + +export async function syncCyclesAndNodeListV2( + activeArchivers: State.ArchiverNodeInfo[], + lastStoredCycleCount = 0 +): Promise { + // Sync validator list and get the latest cycle from the network + Logger.mainLogger.debug('Syncing validators and latest cycle...') + const syncResult = await syncV2(activeArchivers) + let cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData + if (syncResult.isOk()) { + cycleToSyncTo = syncResult.value + } else { + throw syncResult.error + } + + Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) + Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) + + currentConsensusRadius = await getConsensusRadius() + await processCycles([cycleToSyncTo]) + + // Download old cycle Records + await downloadOldCycles(cycleToSyncTo, lastStoredCycleCount) + + return true +} + +export async function syncCyclesBetweenCycles(lastStoredCycle = 0, cycleToSyncTo = 0): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + + let startCycle = lastStoredCycle + let endCycle = startCycle + MAX_CYCLES_PER_REQUEST + + while (cycleToSyncTo > startCycle) { + if (endCycle > cycleToSyncTo) endCycle = cycleToSyncTo + Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) + + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + + if (res && res.cycleInfo) { + const cycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] + Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) + + let validCyclesCount = 0 + for (const cycle of cycles) { + if (!validateCycleData(cycle)) { + Logger.mainLogger.debug('Found invalid cycle data') + continue + } + await processCycles([cycle]) + validCyclesCount++ + } + + success = true + + if (cycles.length < MAX_CYCLES_PER_REQUEST || validCyclesCount === 0) { + startCycle += Math.max(cycles.length, 1) + endCycle = startCycle + MAX_CYCLES_PER_REQUEST + if (startCycle >= cycleToSyncTo) { + Logger.mainLogger.debug('Sync cycles completed!') + return true + } + break + } + } else { + Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for cycle download') + return false + } + } + } + + if (success) { + startCycle = endCycle + 1 + endCycle += MAX_CYCLES_PER_REQUEST + } + } + + return true +} + +import { getLastUpdatedCycle, updateLastUpdatedCycle } from '../utils/cycleTracker' + +export async function syncReceipts(): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + + // Get the last updated cycle from tracker file + const lastUpdatedCycle = getLastUpdatedCycle() + Logger.mainLogger.debug(`[syncReceipts] Last updated cycle from tracker: ${lastUpdatedCycle}`) + + // If we have a valid last updated cycle, use it as the starting point + let startCycle = 0 + if (lastUpdatedCycle > 0) { + Logger.mainLogger.info(`[syncReceipts] Starting receipt sync from last updated cycle: ${lastUpdatedCycle}`) + startCycle = Math.max(lastUpdatedCycle - config.checkpoint.syncCycleBuffer, 0) + await syncReceiptsByCycle(startCycle) + return + } + + let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalReceipts < 0) { + return + } + + let { totalReceipts } = response + if (totalReceipts < 1) return + + let complete = false + let start = 0 + let end = start + MAX_RECEIPTS_PER_REQUEST + + while (!complete) { + if (end >= totalReceipts) { + response = await getTotalDataFromArchivers() + if (response && response.totalReceipts > 0) { + if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts + Logger.mainLogger.debug('totalReceiptsToSync', totalReceipts) + } + } + + Logger.mainLogger.debug(`Downloading receipts from ${start} to ${end}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + start: start, + end: end, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (res && res.receipts) { + const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] + Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) + await storeReceiptData(downloadedReceipts, '', false, false, true) + success = true + + if (downloadedReceipts.length < MAX_RECEIPTS_PER_REQUEST) { + start += downloadedReceipts.length + end = start + MAX_RECEIPTS_PER_REQUEST + response = await getTotalDataFromArchivers() + if (response && response.totalReceipts > 0) { + if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts + if (start >= totalReceipts) { + complete = true + Logger.mainLogger.debug('Download receipts completed') + } + } + } + } else { + Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for receipt download') + start = end + 1 + end += MAX_RECEIPTS_PER_REQUEST + // Check if we've passed total receipts after incrementing + if (start >= totalReceipts) { + complete = true + } + } + } + } + + if (success) { + start = end + 1 + end += MAX_RECEIPTS_PER_REQUEST + } + } + + Logger.mainLogger.debug('Sync receipts data completed!') +} + +interface ArchiverWithRetries { + archiver: State.ArchiverNodeInfo + retriesLeft: number +} + +class ArchiverSelector { + private archivers: ArchiverWithRetries[] + private currentIndex: number = 0 + private readonly maxRetries: number = 3 + + constructor() { + this.archivers = State.otherArchivers.map((archiver) => ({ + archiver, + retriesLeft: this.maxRetries, + })) + Utils.shuffleArray(this.archivers) + } + + getCurrentArchiver(): State.ArchiverNodeInfo | null { + if (this.currentIndex >= this.archivers.length) { + return null + } + return this.archivers[this.currentIndex].archiver + } + + markCurrentArchiverFailed(): State.ArchiverNodeInfo | null { + if (this.currentIndex >= this.archivers.length) { + return null + } + + this.archivers[this.currentIndex].retriesLeft-- + + if (this.archivers[this.currentIndex].retriesLeft <= 0) { + this.currentIndex++ + } + + return this.getCurrentArchiver() + } + + hasMoreArchivers(): boolean { + return this.currentIndex < this.archivers.length + } +} + +export async function syncReceiptsByCycle(lastStoredReceiptCycle = 0, cycleToSyncTo = 0): Promise { + // Get the last updated cycle from tracker if not provided + if (lastStoredReceiptCycle === 0) { + const trackedCycle = getLastUpdatedCycle() + if (trackedCycle > 0) { + Logger.mainLogger.info(`[syncReceiptsByCycle] Using last updated cycle from tracker: ${trackedCycle}`) + lastStoredReceiptCycle = Math.max(trackedCycle - config.checkpoint.syncCycleBuffer, 0) + } + } + + let totalCycles = cycleToSyncTo + let totalReceipts = 0 + if (cycleToSyncTo === 0) { + const response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalReceipts < 0) { + return false + } + totalCycles = response.totalCycles + totalReceipts = response.totalReceipts + } + let startCycle = lastStoredReceiptCycle + let endCycle = startCycle + MAX_BETWEEN_CYCLES_PER_REQUEST + let receiptsCountToSyncBetweenCycles = 0 + let savedReceiptsCountBetweenCycles = 0 + let totalSavedReceiptsCount = 0 + let archiverSelector = new ArchiverSelector() + + while (true) { + if (endCycle > totalCycles) { + endCycle = totalCycles + totalSavedReceiptsCount = await ReceiptDB.queryReceiptCount() + } + if (cycleToSyncTo > 0) { + if (startCycle > cycleToSyncTo) { + Logger.mainLogger.debug(`Sync receipts data completed!`) + return true + } + } else { + if (totalSavedReceiptsCount >= totalReceipts) { + const res: any = await getTotalDataFromArchivers() + if (res && res.totalReceipts > 0) { + if (res.totalReceipts > totalReceipts) totalReceipts = res.totalReceipts + if (res.totalCycles > totalCycles) totalCycles = res.totalCycles + Logger.mainLogger.debug('totalReceiptsToSync', totalReceipts, 'totalSavedReceipts', totalSavedReceiptsCount) + if (totalSavedReceiptsCount === totalReceipts) { + Logger.mainLogger.debug('Sync receipts data completed!') + return true + } + } + } + } + if (startCycle > endCycle) { + Logger.mainLogger.error( + `Got some issues in syncing receipts. Receipts query startCycle ${startCycle} is greater than endCycle ${endCycle}` + ) + return false + } + + const currentArchiver = archiverSelector.getCurrentArchiver() + if (!currentArchiver || !archiverSelector.hasMoreArchivers()) { + Logger.mainLogger.error('All archivers exhausted') + return false + } + + Logger.mainLogger.debug( + `Downloading receipts from cycle ${startCycle} to cycle ${endCycle} using archiver ${currentArchiver.ip}:${currentArchiver.port}` + ) + let response = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle, + endCycle, + type: 'count', + archiver: currentArchiver, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptCountResponse + + if (response && response.receipts > 0) { + receiptsCountToSyncBetweenCycles = response.receipts + let page = 1 + savedReceiptsCountBetweenCycles = 0 + while (savedReceiptsCountBetweenCycles < receiptsCountToSyncBetweenCycles) { + const res = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle, + endCycle, + page, + archiver: currentArchiver, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + if (res && res.receipts && Array.isArray(res.receipts) && res.receipts.length > 0) { + const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] + Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) + await storeReceiptData(downloadedReceipts, '', false, false, true) + savedReceiptsCountBetweenCycles += downloadedReceipts.length + if (savedReceiptsCountBetweenCycles > receiptsCountToSyncBetweenCycles) { + response = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle, + endCycle, + type: 'count', + archiver: currentArchiver, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptCountResponse + if (response && response.receipts) receiptsCountToSyncBetweenCycles = response.receipts + if (receiptsCountToSyncBetweenCycles > savedReceiptsCountBetweenCycles) { + savedReceiptsCountBetweenCycles -= downloadedReceipts.length + continue + } + } + Logger.mainLogger.debug( + 'savedReceiptsCountBetweenCycles', + savedReceiptsCountBetweenCycles, + 'receiptsCountToSyncBetweenCycles', + receiptsCountToSyncBetweenCycles + ) + if (savedReceiptsCountBetweenCycles > receiptsCountToSyncBetweenCycles) { + Logger.mainLogger.debug( + `It has downloaded more receipts than it has in cycles between ${startCycle} and ${endCycle} !` + ) + } + totalSavedReceiptsCount += downloadedReceipts.length + page++ + } else { + Logger.mainLogger.debug('Invalid or empty download response') + const nextArchiver = archiverSelector.markCurrentArchiverFailed() + if (nextArchiver) { + Logger.mainLogger.debug(`Switching to next archiver: ${nextArchiver.ip}:${nextArchiver.port}`) + continue + } + if (!archiverSelector.hasMoreArchivers()) { + Logger.mainLogger.error('All archivers exhausted') + return false + } + } + } + Logger.mainLogger.debug(`Download receipts completed for ${startCycle} - ${endCycle}`) + // Update checkpoint status for completed cycles + + // Update the cycle tracker with the latest cycle we've processed + updateLastUpdatedCycle(endCycle) + Logger.mainLogger.debug(`[syncReceiptsByCycle] Updated cycle tracker to cycle ${endCycle}`) + + startCycle = endCycle + 1 + endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST + archiverSelector = new ArchiverSelector() + } else { + receiptsCountToSyncBetweenCycles = response?.receipts || 0 + if (receiptsCountToSyncBetweenCycles === 0) { + startCycle = endCycle + 1 + endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST + + archiverSelector = new ArchiverSelector() + continue + } + Logger.mainLogger.debug('Invalid download response') + const nextArchiver = archiverSelector.markCurrentArchiverFailed() + if (nextArchiver) { + Logger.mainLogger.debug(`Switching to next archiver: ${nextArchiver.ip}:${nextArchiver.port}`) + continue + } + if (!archiverSelector.hasMoreArchivers()) { + Logger.mainLogger.error('All archivers exhausted') + return false + } + } + } +} + +export const syncOriginalTxs = async (): Promise => { + const MAX_RETRIES = 3 + let retryCount = 0 + + let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalOriginalTxs < 0) { + return + } + + let { totalOriginalTxs } = response + if (totalOriginalTxs < 1) return + + let complete = false + let start = 0 + let end = start + MAX_ORIGINAL_TXS_PER_REQUEST + + while (!complete) { + if (end >= totalOriginalTxs) { + // If the number of new original txs to sync is within MAX_ORIGINAL_TXS_PER_REQUEST => Update to the latest totalOriginalTxs. + response = await getTotalDataFromArchivers() + if (response && response.totalOriginalTxs > 0) { + if (response.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = response.totalOriginalTxs + Logger.mainLogger.debug('totalOriginalTxs: ', totalOriginalTxs) + } + } + + Logger.mainLogger.debug(`Downloading Original-Txs from ${start} to ${end}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res: any = await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + start: start, + end: end, + }, + QUERY_TIMEOUT_MAX + ) + + if (res && res.originalTxs) { + const downloadedOriginalTxs = res.originalTxs + Logger.mainLogger.debug('Downloaded Original-Txs: ', downloadedOriginalTxs.length) + await storeOriginalTxData(downloadedOriginalTxs) + success = true + + if (downloadedOriginalTxs.length < MAX_ORIGINAL_TXS_PER_REQUEST) { + start += downloadedOriginalTxs.length + end = start + MAX_ORIGINAL_TXS_PER_REQUEST + response = await getTotalDataFromArchivers() + if (response && response.totalOriginalTxs > 0) { + if (response.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = response.totalOriginalTxs + if (start >= totalOriginalTxs) { + complete = true + Logger.mainLogger.debug('Download Original-Txs Completed!') + } + } + break + } + } else { + Logger.mainLogger.debug(`Invalid Original-Tx download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for Original-Tx download') + start = end + 1 + end += MAX_ORIGINAL_TXS_PER_REQUEST + if (start >= totalOriginalTxs) { + complete = true + } + } + } + } + + if (success) { + start = end + 1 + end += MAX_ORIGINAL_TXS_PER_REQUEST + } + } + + Logger.mainLogger.debug('Sync Original-Txs Data Completed!') +} + +export const syncOriginalTxsByCycle = async (lastStoredOriginalTxCycle = 0, cycleToSyncTo = 0): Promise => { + let totalCycles = cycleToSyncTo + let totalOriginalTxs = 0 + if (cycleToSyncTo === 0) { + const response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalOriginalTxs < 1) { + return + } + totalCycles = response.totalCycles + totalOriginalTxs = response.totalOriginalTxs + } + const complete = false + let startCycle = lastStoredOriginalTxCycle + let endCycle = startCycle + MAX_BETWEEN_CYCLES_PER_REQUEST + let originalTxCountToSyncBetweenCycles = 0 + let savedOriginalTxCountBetweenCycles = 0 + let totalSavedOriginalTxCount = 0 + while (!complete) { + if (endCycle > totalCycles) { + endCycle = totalCycles + totalSavedOriginalTxCount = await OriginalTxDB.queryOriginalTxDataCount() + } + if (cycleToSyncTo > 0) { + if (startCycle > cycleToSyncTo) { + Logger.mainLogger.debug(`Sync originalTXs data completed!`) + break + } + } else { + if (totalSavedOriginalTxCount >= totalOriginalTxs) { + const res: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (res && res.totalOriginalTxs > 0) { + if (res.totalOriginalTxs > totalOriginalTxs) totalOriginalTxs = res.totalOriginalTxs + if (res.totalCycles > totalCycles) totalCycles = res.totalCycles + Logger.mainLogger.debug( + 'totalOriginalTxsToSync: ', + totalOriginalTxs, + 'totalSavedOriginalTxs: ', + totalSavedOriginalTxCount + ) + if (totalSavedOriginalTxCount === totalOriginalTxs) { + Logger.mainLogger.debug('Sync Original-Tx data completed!') + break + } + } + } + } + if (startCycle > endCycle) { + Logger.mainLogger.error( + `Got some issues in syncing Original-Tx data. Original-Tx query startCycle ${startCycle} is greater than endCycle ${endCycle}` + ) + break + } + Logger.mainLogger.debug(`Downloading Original-Tx data from cycle ${startCycle} to cycle ${endCycle}`) + let response = (await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + startCycle, + endCycle, + type: 'count', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverOriginalTxCountResponse + if (response && response.originalTxs > 0) { + originalTxCountToSyncBetweenCycles = response.originalTxs + let page = 1 + savedOriginalTxCountBetweenCycles = 0 + while (savedOriginalTxCountBetweenCycles < originalTxCountToSyncBetweenCycles) { + const res = (await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + startCycle, + endCycle, + page, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverOriginalTxResponse + if (res && res.originalTxs) { + const downloadedOriginalTxs = res.originalTxs as OriginalTxDB.OriginalTxData[] + Logger.mainLogger.debug('Downloaded Original-Txs: ', downloadedOriginalTxs.length) + await storeOriginalTxData(downloadedOriginalTxs) + savedOriginalTxCountBetweenCycles += downloadedOriginalTxs.length + if (savedOriginalTxCountBetweenCycles > originalTxCountToSyncBetweenCycles) { + response = (await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + startCycle, + endCycle, + type: 'count', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverOriginalTxCountResponse + if (response && response.originalTxs) originalTxCountToSyncBetweenCycles = response.originalTxs + if (originalTxCountToSyncBetweenCycles > savedOriginalTxCountBetweenCycles) { + savedOriginalTxCountBetweenCycles -= downloadedOriginalTxs.length + continue + } + } + Logger.mainLogger.debug( + 'savedOriginalTxCountBetweenCycles', + savedOriginalTxCountBetweenCycles, + 'originalTxCountToSyncBetweenCycles', + originalTxCountToSyncBetweenCycles + ) + if (savedOriginalTxCountBetweenCycles > originalTxCountToSyncBetweenCycles) { + Logger.mainLogger.debug( + `It has downloaded more originalTxsData than it has in cycles between ${startCycle} and ${endCycle} !` + ) + } + totalSavedOriginalTxCount += downloadedOriginalTxs.length + page++ + } else { + Logger.mainLogger.debug('Invalid Original-Txs download response') + continue + } + } + Logger.mainLogger.debug(`Download Original-Txs completed for ${startCycle} - ${endCycle}`) + startCycle = endCycle + 1 + endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST + } else { + originalTxCountToSyncBetweenCycles = response.originalTxs + if (originalTxCountToSyncBetweenCycles === 0) { + startCycle = endCycle + 1 + endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST + continue + } + Logger.mainLogger.debug('Invalid Original-Txs download response') + continue + } + } +} + +export const syncCyclesAndTxsData = async ( + lastStoredCycleCount = 0, + lastStoredReceiptCount = 0, + lastStoredOriginalTxCount = 0 +): Promise => { + const MAX_RETRIES = 3 + let retryCount = 0 + + // Get the last updated cycle from tracker if not provided + if (lastStoredCycleCount === 0) { + const trackedCycle = getLastUpdatedCycle() + if (trackedCycle > 0) { + Logger.mainLogger.info(`[syncCyclesAndTxsData] Using last updated cycle from tracker: ${trackedCycle}`) + lastStoredCycleCount = Math.max(trackedCycle - config.checkpoint.syncCycleBuffer, 0) + } + } + + let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalCycles < 0 || response.totalReceipts < 0) { + return + } + const { totalCycles, totalReceipts } = response + Logger.mainLogger.debug('totalCycles', totalCycles, 'lastStoredCycleCount', lastStoredCycleCount) + Logger.mainLogger.debug('totalReceipts', totalReceipts, 'lastStoredReceiptCount', lastStoredReceiptCount) + // Logger.mainLogger.debug( + // 'totalOriginalTxs', + // totalOriginalTxs, + // 'lastStoredOriginalTxCount', + // lastStoredOriginalTxCount + // ) + if ( + totalCycles === lastStoredCycleCount && + totalReceipts === lastStoredReceiptCount + // && totalOriginalTxs === lastStoredOriginalTxCount + ) { + Logger.mainLogger.debug('The archiver has synced the lastest cycle ,receipts and originalTxs data!') + return + } + let totalReceiptsToSync = totalReceipts + // let totalOriginalTxsToSync = totalOriginalTxs + let totalCyclesToSync = totalCycles + let completeForReceipt = false + // let completeForOriginalTx = false + let completeForCycle = false + let startReceipt = lastStoredReceiptCount + // let startOriginalTx = lastStoredOriginalTxCount + let startCycle = lastStoredCycleCount + let endReceipt = startReceipt + MAX_RECEIPTS_PER_REQUEST + // let endOriginalTx = startOriginalTx + MAX_ORIGINAL_TXS_PER_REQUEST + let endCycle = startCycle + MAX_CYCLES_PER_REQUEST + + if (totalCycles === lastStoredCycleCount) completeForCycle = true + if (totalReceipts === lastStoredReceiptCount) completeForReceipt = true + // if (totalOriginalTxs === lastStoredOriginalTxCount) completeForOriginalTx = true + + while ( + !completeForReceipt || + !completeForCycle + // || !completeForOriginalTx + ) { + if ( + endReceipt >= totalReceiptsToSync || + endCycle >= totalCyclesToSync + // || endOriginalTx >= totalOriginalTxsToSync + ) { + response = await getTotalDataFromArchivers() + if ( + response && + response.totalReceipts && + response.totalCycles + // && response.totalOriginalTxs + ) { + if (response.totalReceipts !== totalReceiptsToSync) { + completeForReceipt = false + totalReceiptsToSync = response.totalReceipts + } + // if (response.totalOriginalTxs !== totalOriginalTxsToSync) { + // completeForOriginalTx = false + // totalOriginalTxsToSync = response.totalOriginalTxs + // } + if (response.totalCycles !== totalCyclesToSync) { + completeForCycle = false + totalCyclesToSync = response.totalCycles + } + if (totalReceiptsToSync < startReceipt) { + completeForReceipt = true + } + // if (totalOriginalTxsToSync < startOriginalTx) { + // completeForOriginalTx = true + // } + if (totalCyclesToSync < startCycle) { + completeForCycle = true + } + Logger.mainLogger.debug( + 'totalReceiptsToSync', + totalReceiptsToSync, + // 'totalOriginalTxsToSync', + // totalOriginalTxsToSync, + 'totalCyclesToSync', + totalCyclesToSync + ) + } + } + if (!completeForReceipt) { + Logger.mainLogger.debug(`Downloading receipts from ${startReceipt} to ${endReceipt}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + start: startReceipt, + end: endReceipt, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (res && res.receipts) { + const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] + Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) + await storeReceiptData(downloadedReceipts, '', false, false, true) + success = true + if (downloadedReceipts.length < MAX_ORIGINAL_TXS_PER_REQUEST) { + startReceipt += downloadedReceipts.length + 1 + endReceipt += downloadedReceipts.length + MAX_ORIGINAL_TXS_PER_REQUEST + } + } else { + Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for receipt download') + } + } + } + if (success) { + startReceipt = endReceipt + 1 + endReceipt += MAX_ORIGINAL_TXS_PER_REQUEST + } + } + + // if (!completeForOriginalTx) { + // Logger.mainLogger.debug(`Downloading Original-Txs from ${startOriginalTx} to ${endOriginalTx}`) + // let success = false + // retryCount = 0 + + // while (!success && retryCount < MAX_RETRIES) { + // const res = (await queryFromArchivers( + // RequestDataType.ORIGINALTX, + // { + // start: startOriginalTx, + // end: endOriginalTx, + // }, + // QUERY_TIMEOUT_MAX + // )) as ArchiverOriginalTxResponse + + // if (res && res.originalTxs) { + // const downloadedOriginalTxs = res.originalTxs as OriginalTxDB.OriginalTxData[] + // Logger.mainLogger.debug(`Downloaded Original-Txs: `, downloadedOriginalTxs.length) + // await storeOriginalTxData(downloadedOriginalTxs) + // success = true + + // if (downloadedOriginalTxs.length < MAX_ORIGINAL_TXS_PER_REQUEST) { + // startOriginalTx += downloadedOriginalTxs.length + 1 + // endOriginalTx += downloadedOriginalTxs.length + MAX_ORIGINAL_TXS_PER_REQUEST + // break + // } + // } else { + // Logger.mainLogger.debug(`Invalid Original-Tx download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + // retryCount++ + // if (retryCount >= MAX_RETRIES) { + // Logger.mainLogger.error('Max retries reached for Original-Tx download') + // startOriginalTx = endOriginalTx + 1 + // endOriginalTx += MAX_ORIGINAL_TXS_PER_REQUEST + // } + // continue + // } + // } + // if (success) { + // startOriginalTx = endOriginalTx + 1 + // endOriginalTx += MAX_ORIGINAL_TXS_PER_REQUEST + // } + // } + + if (!completeForCycle) { + Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (res && res.cycleInfo) { + const cycles = res.cycleInfo + Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) + for (const cycle of cycles) { + if (!validateCycleData(cycle)) { + Logger.mainLogger.debug('Found invalid cycle data') + continue + } + await processCycles([cycle]) + } + success = true + + // Update the cycle tracker with the highest cycle we've processed + const highestCycle = cycles.reduce((max, cycle) => Math.max(max, cycle.counter), 0) + if (highestCycle > 0) { + updateLastUpdatedCycle(highestCycle) + Logger.mainLogger.debug(`[syncCyclesAndTxsData] Updated cycle tracker to cycle ${highestCycle}`) + } + + if (cycles.length < MAX_CYCLES_PER_REQUEST) { + startCycle += cycles.length + 1 + endCycle += cycles.length + MAX_CYCLES_PER_REQUEST + } + } else { + Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for cycle download') + } + } + } + if (success) { + startCycle = endCycle + 1 + endCycle += MAX_CYCLES_PER_REQUEST + } + } + } + Logger.mainLogger.debug('Sync Cycle, Receipt & Original-Tx data completed!') +} + +export const syncCyclesAndTxsDataBetweenCycles = async (lastStoredCycle = 0, cycleToSyncTo = 0): Promise => { + Logger.mainLogger.debug(`Syncing cycles and txs data between cycles ${lastStoredCycle} and ${cycleToSyncTo}`) + await syncCyclesBetweenCycles(lastStoredCycle, cycleToSyncTo) + await syncReceiptsByCycle(lastStoredCycle, cycleToSyncTo) + // await syncOriginalTxsByCycle(lastStoredCycle, cycleToSyncTo) +} + +// // simple method to validate old data; it's not good when there are multiple archivers, the receipts saving order may not be the same +// export async function compareWithOldReceiptsData( +// archiver: State.ArchiverNodeInfo, +// lastReceiptCount = 0 +// ) { +// let downloadedReceipts +// const response: any = await P2P.getJson( +// `http://${archiver.ip}:${archiver.port}/receipt?start=${ +// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0 +// }&end=${lastReceiptCount}` +// ) +// if (response && response.receipts) { +// downloadedReceipts = response.receipts +// } else { +// throw Error( +// `Can't fetch data from receipt ${ +// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0 +// } to receipt ${lastReceiptCount} from archiver ${archiver}` +// ) +// } +// let oldReceipts = await ReceiptDB.queryReceipts( +// lastReceiptCount - 10 > 0 ? lastReceiptCount - 10 : 0, +// lastReceiptCount +// ) +// // downloadedReceipts.sort((a, b) => +// // a.cycleRecord.counter > b.cycleRecord.counter ? 1 : -1 +// // ); +// // oldReceipts.sort((a, b) => +// // a.cycleRecord.counter > b.cycleRecord.counter ? 1 : -1 +// // ); +// let success = false +// let receiptsToMatchCount = 10 +// for (let i = 0; i < downloadedReceipts.length; i++) { +// let downloadedReceipt = downloadedReceipts[i] +// const oldReceipt = oldReceipts[i] +// if (oldReceipt.counter) delete oldReceipt.counter +// console.log(downloadedReceipt.receiptId, oldReceipt.receiptId) +// if (downloadedReceipt.receiptId !== oldReceipt.receiptId) { +// return { +// success, +// receiptsToMatchCount, +// } +// } +// success = true +// receiptsToMatchCount-- +// } +// return { success, receiptsToMatchCount } +// } +export async function compareWithOldOriginalTxsData(lastStoredOriginalTxCycle = 0): Promise { + const numberOfCyclesTocompare = 10 + let success = false + let matchedCycle = 0 + const endCycle = lastStoredOriginalTxCycle + const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 + const response = (await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + startCycle, + endCycle, + type: 'tally', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverOriginalTxResponse + + if (!response || !response.originalTxs) { + Logger.mainLogger.error(`Can't fetch original tx data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + return { success, matchedCycle } + } + const downloadedOriginalTxsByCycles = response.originalTxs as OriginalTxDB.OriginalTxDataCount[] + + const oldOriginalTxCountByCycle = await OriginalTxDB.queryOriginalTxDataCountByCycles(startCycle, endCycle) + + for (let i = 0; i < downloadedOriginalTxsByCycles.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const downloadedOriginalTx = downloadedOriginalTxsByCycles[i] + // eslint-disable-next-line security/detect-object-injection + const oldOriginalTx = oldOriginalTxCountByCycle[i] + Logger.mainLogger.debug(downloadedOriginalTx, oldOriginalTx) + if ( + !downloadedOriginalTx || + !oldOriginalTx || + downloadedOriginalTx.cycle !== oldOriginalTx.cycle || + downloadedOriginalTx.originalTxDataCount !== oldOriginalTx.originalTxDataCount + ) { + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedOriginalTx.cycle + } + success = true + return { success, matchedCycle } +} + +export async function compareWithOldReceiptsData(lastStoredReceiptCycle = 0): Promise { + const numberOfCyclesTocompare = 10 + let success = false + let matchedCycle = 0 + const endCycle = lastStoredReceiptCycle + const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 + const response = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle, + endCycle, + type: 'tally', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (!response || !response.receipts) { + Logger.mainLogger.error(`Can't fetch receipts data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + return { success, matchedCycle } + } + const downloadedReceiptCountByCycles = response.receipts as ReceiptDB.ReceiptCount[] + + const oldReceiptCountByCycle = await ReceiptDB.queryReceiptCountByCycles(startCycle, endCycle) + for (let i = 0; i < downloadedReceiptCountByCycles.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const downloadedReceipt = downloadedReceiptCountByCycles[i] + // eslint-disable-next-line security/detect-object-injection + const oldReceipt = oldReceiptCountByCycle[i] + Logger.mainLogger.debug(downloadedReceipt, oldReceipt) + if ( + !downloadedReceipt || + !oldReceipt || + downloadedReceipt.cycle !== oldReceipt.cycle || + downloadedReceipt.receiptCount !== oldReceipt.receiptCount + ) { + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedReceipt.cycle + } + success = true + return { success, matchedCycle } +} + +export async function compareWithOldCyclesData(lastCycleCounter = 0): Promise { + try { + const numberOfCyclesTocompare = 10 + const start = lastCycleCounter - numberOfCyclesTocompare + const end = lastCycleCounter + const response = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start, + end, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (!response && !response.cycleInfo) { + throw Error(`Can't fetch data from cycle ${start} to cycle ${end} from archivers`) + } + const downloadedCycles = response.cycleInfo + const oldCycles = await CycleDB.queryCycleRecordsBetween(start, end) + let success = false + let matchedCycle = 0 + for (let i = 0; i < downloadedCycles.length; i++) { + // eslint-disable-next-line security/detect-object-injection + const downloadedCycle = downloadedCycles[i] + // eslint-disable-next-line security/detect-object-injection + const oldCycle = oldCycles[i] + if ( + !downloadedCycle || + !oldCycle || + StringUtils.safeStringify(downloadedCycle) !== StringUtils.safeStringify(oldCycle) + ) { + if (config.VERBOSE) { + Logger.mainLogger.error('Mismatched cycle Number', downloadedCycle.counter, oldCycle.counter) + } + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedCycle.counter + } + return { success, matchedCycle } + } catch (error) { + Logger.mainLogger.error('compareWithOldCyclesData error: ' + error) + return { success: false, matchedCycle: 0 } + } +} + +async function downloadOldCycles( + cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData, + lastStoredCycleCount: number +): Promise { + let endCycle = cycleToSyncTo.counter - 1 + Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) + if (endCycle > lastStoredCycleCount) { + Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) + } + + let savedCycleRecord = cycleToSyncTo + const MAX_RETRY_COUNT = 3 + let retryCount = 0 + while (endCycle > lastStoredCycleCount) { + let startCycle: number = endCycle - MAX_CYCLES_PER_REQUEST + if (startCycle < 0) startCycle = 0 + if (startCycle < lastStoredCycleCount) startCycle = lastStoredCycleCount + Logger.mainLogger.debug(`Getting cycles ${startCycle} - ${endCycle} ...`) + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (!res || !res.cycleInfo || !Array.isArray(res.cycleInfo) || res.cycleInfo.length === 0) { + Logger.mainLogger.error(`Can't fetch data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + if (retryCount < MAX_RETRY_COUNT) { + retryCount++ + continue + } else { + endCycle = startCycle - 1 + retryCount = 0 + } + } + + const prevCycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] + if (prevCycles) prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + const combineCycles: P2PTypes.CycleCreatorTypes.CycleData[] = [] + for (const prevCycle of prevCycles) { + if (validateCycle(prevCycle, savedCycleRecord) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) + } + savedCycleRecord = prevCycle + combineCycles.push(prevCycle) + } + await storeCycleData(combineCycles) + endCycle = startCycle - 1 + } +} + +/** + * Syncs cycle data for a specific cycle + * @param cycle The cycle number to sync + * @returns True if successful, false otherwise + */ +export async function syncCycleData(cycle: number): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + let success = false + + Logger.mainLogger.debug(`syncCycleData: Starting sync for cycle ${cycle}`) + Logger.mainLogger.debug(`syncCycleData: Active nodes count: ${NodeList.activeListByIdSorted.length}`) + + while (!success && retryCount < MAX_RETRIES) { + try { + Logger.mainLogger.debug(`syncCycleData: Attempt ${retryCount + 1} for cycle ${cycle}`) + + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: cycle, + end: cycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + + if (res && res.cycleInfo && res.cycleInfo.length > 0) { + const cycleData = res.cycleInfo[0] + Logger.mainLogger.debug(`syncCycleData: Received data for cycle ${cycle}, marker: ${cycleData.marker}`) + + if (!validateCycleData(cycleData)) { + Logger.mainLogger.error(`syncCycleData: Invalid cycle data for cycle ${cycle}`) + Logger.mainLogger.error(`syncCycleData: Cycle validation failed, checking marker computation...`) + nestedCountersInstance.countEvent('archiver', 'cycle_validation_failed - ' + cycle) + + // Debug marker computation + const cycleDataCopy = { ...cycleData } + delete cycleDataCopy.marker + const computedMarker = Cycles.computeCycleMarker(cycleDataCopy) + Logger.mainLogger.error( + `syncCycleData: Computed marker: ${computedMarker}, received marker: ${cycleData.marker}` + ) + + retryCount++ + continue + } + + await processCycles([cycleData]) + Logger.mainLogger.debug(`syncCycleData: Successfully synced and processed cycle ${cycle}`) + // Successfully synced cycle data for cycle + success = true + return true + } else { + Logger.mainLogger.error( + `syncCycleData: Failed to get cycle data for cycle ${cycle}, attempt ${retryCount + 1} of ${MAX_RETRIES}` + ) + retryCount++ + } + } catch (error) { + Logger.mainLogger.error(`syncCycleData: Error syncing cycle data for cycle ${cycle}: ${error}`) + retryCount++ + } + } + + Logger.mainLogger.error(`syncCycleData: All attempts to sync cycle ${cycle} failed`) + return false +} + +// We want to check that all of the provided certs are actually valid. This means we need to check that +// they all have the same marker, and that that marker is the same as the one of the original record. +// We also want all of the signer to actually be active node. And of course, the certs must pass sign +// verification. If we have already verified a cert in the past, we can skip it. It is possble for a +// malicious node to use valid certs from a honest record to get this function to return true. However, +// it will also have to make sure the inpMarker is the same as the markers as it is in the certs. If it +// does this, then the validateCycleData() function that gets called later will fail +function validateCerts( + certs: P2PTypes.CycleCreatorTypes.CycleCert[], + certSigners: Set, + inpMarker: string, + cycleData: P2PTypes.CycleCreatorTypes.CycleData +) { + nestedCountersInstance.countEvent('validateCerts', 'validation', 1) + Logger.mainLogger.debug(`validateCerts: Validating ${certs.length} certificates against marker ${inpMarker}`) + + for (const cert of certs) { + const cleanCert: P2PTypes.CycleCreatorTypes.CycleCert = { + marker: cert.marker, + sign: cert.sign, + } + if (cleanCert.marker !== inpMarker) { + nestedCountersInstance.countEvent('validateCerts', 'markerMismatch', 1) + + validationTracker.add({ cycle: cycleData }) + + return false + } + if (NodeList.activeListByIdSorted.some((node) => node.publicKey === cleanCert.sign.owner) === false) { + nestedCountersInstance.countEvent('validateCerts', 'badOwner', 1) + Logger.mainLogger.warn(`validateCerts: bad owner ${cleanCert.sign.owner} not found in active nodes`) + return false + } + if (certSigners.has(cert.sign.owner)) { + nestedCountersInstance.countEvent('validateCerts', 'skipExistingSigner', 1) + Logger.mainLogger.debug(`validateCerts: Skipping already verified cert from ${cert.sign.owner}`) + continue + } + if (!Crypto.verify(cleanCert)) { + nestedCountersInstance.countEvent('validateCerts', 'badSignature', 1) + Logger.mainLogger.warn(`validateCerts: bad signature from ${cleanCert.sign.owner}`) + return false + } + nestedCountersInstance.countEvent('validateCerts', 'validCert', 1) + } + + Logger.mainLogger.debug(`validateCerts: All certificates validated successfully`) + return true +} + +export function scoreCert(pubKey: string, prevMarker: P2PTypes.CycleCreatorTypes.CycleMarker): number { + try { + const node = NodeList.byPublicKey.get(pubKey) + const id = node.id // get node id from cert pub key + const obj = { id } + const hid = Crypto.hashObj(obj) // Omar - use hash of id so the cert is not made by nodes that are near based on node id + + const out = XOR(prevMarker, hid) + + // will also nerf if foundationNode is undefined, which is will be for already active nodes when we + // first turn on the addFoundationNodeAttribute flag under the current implementation + if (config.nerfNonFoundationCertScores && !node.foundationNode) { + return out & 0x0fffffff + } + + return out + } catch (err) { + Logger.mainLogger.error('scoreCert ERR:', err) + return 0 + } +} + +// this function is needed since the cycle record is changed after Q3/Q4. Thus, the cycle certs will contain +// the marker of the cycle as it existed in Q3/Q4. However, the cycle that we ceived at the start of the +// function has been changed, so its marker has also been changed. If we try to check this new mark against +// the markers inside the certs, the validation will obviously fail. So we want to revert those changes on a +// deep copy so that we can get the original record +function getRecordWithoutPostQ3Changes(cycle: P2PTypes.CycleCreatorTypes.CycleRecord) { + Logger.mainLogger.debug(`getRecordWithoutPostQ3Changes: Processing cycle ${cycle.counter}`) + + const cycleCopy = StringUtils.safeJsonParse(StringUtils.safeStringify(cycle)) + delete cycleCopy.marker + delete cycleCopy.certificates + cycleCopy.nodeListHash = '' + cycleCopy.archiverListHash = '' + cycleCopy.standbyNodeListHash = '' + cycleCopy.joinedConsensors.forEach((jc) => (jc.syncingTimestamp = 0)) + return cycleCopy +} diff --git a/src/Data/accountData.ts b/src/Data/accountData.ts new file mode 100644 index 00000000..ba57769a --- /dev/null +++ b/src/Data/accountData.ts @@ -0,0 +1,41 @@ +import { CombinedAccountsData } from './types' +import * as ReceiptDB from '../dbstore/receipts' + +let combineAccountsData: CombinedAccountsData = { + accounts: [], + receipts: [], +} + +export function clearCombinedAccountsData(): void { + combineAccountsData = { + accounts: [], + receipts: [], + } +} + +export function addToCombinedAccountsData(data: { accounts?: any[]; receipts?: ReceiptDB.Receipt[] }): void { + let newCombineAccountsData = { ...combineAccountsData } + if (data.accounts) + newCombineAccountsData.accounts = [ + ...newCombineAccountsData.accounts, + ...data.accounts, + ] + if (data.receipts) + newCombineAccountsData.receipts = [ + ...newCombineAccountsData.receipts, + ...data.receipts, + ] + combineAccountsData = { ...newCombineAccountsData } +} + +export function getCombinedAccountsData(): CombinedAccountsData { + return combineAccountsData +} + +// Placeholder function - will be imported from dataSync.ts +export async function syncGenesisAccountsFromConsensor( + totalGenesisAccounts: any, + firstConsensor: any +): Promise { + // This function is defined in dataSync.ts +} \ No newline at end of file diff --git a/src/Data/cycleData.ts b/src/Data/cycleData.ts new file mode 100644 index 00000000..27e0bdba --- /dev/null +++ b/src/Data/cycleData.ts @@ -0,0 +1,554 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as Logger from '../Logger' +import * as NodeList from '../NodeList' +import * as State from '../State' +import * as Cycles from './Cycles' +import * as Crypto from '../Crypto' +import { Utils as StringUtils } from '@shardeum-foundation/lib-types' +import { Utils as UtilsTypes } from '@shardeum-foundation/lib-types' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { config } from '../Config' +import { ArchiverLogging } from '../profiler/archiverLogging' +import { subscriptionCycleData, DataSender } from './types' +import { storeCycleData, processCycles } from './dataSync' +import { cachedCycleRecords, updateCacheFromDB } from '../cache/cycleRecordsCache' +import { getCurrentCycleCounter } from './Cycles' +import { XOR } from '../utils/general' +import { validateCycleData } from './Cycles' +import { validationTracker } from './socketClient' + +interface CycleMarkerData { + cycleInfo: P2PTypes.CycleCreatorTypes.CycleData | subscriptionCycleData + certSigners: Set +} + +interface ReceivedCycleEntry { + [marker: string]: CycleMarkerData +} + +interface ReceivedCycleTracker { + [counter: number]: ReceivedCycleEntry & { + received?: number + saved?: boolean + } +} + +const receivedCycleTracker: ReceivedCycleTracker = {} +const maxCyclesInCycleTracker = 500 + +export function collectCycleData( + cycleData: subscriptionCycleData[] | P2PTypes.CycleCreatorTypes.CycleData[], + senderInfo: string, + source: string, + dataSenders?: Map +): void { + const startTime = Date.now() + const operationId = ArchiverLogging.generateOperationId() + + Logger.mainLogger.debug( + `collectCycleData: Processing ${cycleData.length} cycles from ${senderInfo}, source: ${source}` + ) + + nestedCountersInstance.countEvent('collectCycleData', 'cycles_received', cycleData.length) + nestedCountersInstance.countEvent('collectCycleData', 'source_' + source, 1) + + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: 0, + dataType: 'CYCLE_RECORD', + dataHash: '', + status: 'STARTED', + operationId, + metrics: { + duration: 0, + dataSize: StringUtils.safeStringify(cycleData).length, + }, + }) + + if (NodeList.activeListByIdSorted.length > 0) { + const [ip, port] = senderInfo.split(':') + const isInActiveNodes = NodeList.activeListByIdSorted.some( + (node) => node.ip === ip && node.port.toString() === port + ) + const isInActiveArchivers = State.activeArchivers.some( + (archiver) => archiver.ip === ip && archiver.port.toString() === port + ) + if (!isInActiveNodes && !isInActiveArchivers) { + nestedCountersInstance.countEvent('collectCycleData', 'sender_not_active', 1) + Logger.mainLogger.warn(`collectCycleData: Ignoring cycle data from non-active node: ${senderInfo}`) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: 0, + dataType: 'CYCLE_RECORD', + dataHash: '', + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycleData).length, + }, + error: 'Sender not in active nodes or archivers', + }) + return + } + } + + for (const cycle of cycleData) { + Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter}, marker: ${cycle.marker}`) + + if (receivedCycleTracker[cycle.counter]?.saved === true) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_already_saved_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} already saved, skipping`) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'COMPLETE', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + }) + break + } + + nestedCountersInstance.countEvent('collectCycleData', 'process_cycle_' + cycle.mode, 1) + + if (source === 'archiver') { + nestedCountersInstance.countEvent('collectCycleData', 'direct_process_from_archiver', 1) + Logger.mainLogger.debug(`collectCycleData: Processing cycle ${cycle.counter} from archiver directly`) + processCycles([cycle as P2PTypes.CycleCreatorTypes.CycleData]) + continue + } + + let receivedCertSigners = [] + if (NodeList.activeListByIdSorted.length > 0) { + const certSigners = receivedCycleTracker[cycle.counter]?.[cycle.marker]?.['certSigners'] ?? new Set() + + try { + Logger.mainLogger.debug(`collectCycleData: Original cycle data: ${UtilsTypes.safeStringify(cycle)}`) + const cycleCopy = getRecordWithoutPostQ3Changes(cycle) + const computedMarker = Cycles.computeCycleMarker(cycleCopy) + Logger.mainLogger.debug(`collectCycleData: cycle copy ${UtilsTypes.safeStringify(cycleCopy)}`) + Logger.mainLogger.debug( + `collectCycleData: Computed marker for cycle ${cycle.counter}: ${computedMarker}, original marker: ${cycle.marker}` + ) + Logger.mainLogger.debug( + `collectCycleData: Validating ${(cycle as subscriptionCycleData).certificates?.length || 0} certificates for cycle ${cycle.counter}` + ) + + const validateCertsResult = validateCerts( + (cycle as subscriptionCycleData).certificates, + certSigners, + computedMarker, + cycleCopy as P2PTypes.CycleCreatorTypes.CycleData + ) + + if (validateCertsResult === false) { + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Certificate validation failed for cycle ${cycle.counter} from ${senderInfo} in ${cycle.mode} mode` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Certificate validation failed', + }) + break + } + + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_success_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Certificate validation successful for cycle ${cycle.counter}`) + } catch (error) { + nestedCountersInstance.countEvent('collectCycleData', 'certificate_validation_error_' + cycle.mode, 1) + Logger.mainLogger.error( + `collectCycleData: Error during certificate validation for cycle ${cycle.counter}: ${error}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: `Certificate validation error: ${error.message}`, + }) + break + } + } + + receivedCertSigners = (cycle as subscriptionCycleData).certificates.map((cert) => cert.sign.owner) + Logger.mainLogger.debug( + `collectCycleData: Received ${receivedCertSigners.length} certificate signers for cycle ${cycle.counter}` + ) + delete (cycle as subscriptionCycleData).certificates + + if (receivedCycleTracker[cycle.counter]) { + if (receivedCycleTracker[cycle.counter][cycle.marker]) { + nestedCountersInstance.countEvent('collectCycleData', 'add_signers_to_existing_marker_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Adding signers to existing marker for cycle ${cycle.counter}`) + for (const signer of receivedCertSigners) + receivedCycleTracker[cycle.counter][cycle.marker]['certSigners'].add(signer) + } else { + if (!validateCycleData(cycle)) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Cycle data validation failed', + }) + continue + } + nestedCountersInstance.countEvent('collectCycleData', 'create_new_marker_entry_' + cycle.mode, 1) + Logger.mainLogger.debug( + `collectCycleData: Creating new marker entry for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + receivedCycleTracker[cycle.counter][cycle.marker] = { + cycleInfo: cycle, + certSigners: new Set(receivedCertSigners), + } + Logger.mainLogger.debug('Different Cycle Record received', cycle.counter) + } + receivedCycleTracker[cycle.counter]['received']++ + Logger.mainLogger.debug( + `collectCycleData: Cycle ${cycle.counter} received count: ${receivedCycleTracker[cycle.counter]['received']}` + ) + } else { + if (!validateCycleData(cycle)) { + nestedCountersInstance.countEvent('collectCycleData', 'cycle_data_validation_failed_' + cycle.mode, 1) + Logger.mainLogger.warn( + `collectCycleData: Cycle data validation failed for cycle ${cycle.counter} with marker ${cycle.marker}` + ) + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: cycle.marker, + status: 'ERROR', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(cycle).length, + }, + error: 'Cycle data validation failed', + }) + continue + } + nestedCountersInstance.countEvent('collectCycleData', 'create_new_cycle_tracker_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Creating new cycle tracker entry for cycle ${cycle.counter}`) + const cycleEntry = {} as ReceivedCycleEntry & { received?: number; saved?: boolean } + cycleEntry[cycle.marker] = { + cycleInfo: cycle, + certSigners: new Set(receivedCertSigners), + } + cycleEntry.received = 1 + cycleEntry.saved = false + receivedCycleTracker[cycle.counter] = cycleEntry + } + if (config.VERBOSE) Logger.mainLogger.debug('Cycle received', cycle.counter, receivedCycleTracker[cycle.counter]) + + if (NodeList.activeListByIdSorted.length === 0) { + nestedCountersInstance.countEvent('collectCycleData', 'no_active_nodes_direct_process_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: No active nodes, processing cycle ${cycle.counter} directly`) + processCycles([receivedCycleTracker[cycle.counter][cycle.marker].cycleInfo as P2PTypes.CycleCreatorTypes.CycleData]) + continue + } + + const requiredSenders = dataSenders && dataSenders.size ? Math.ceil(dataSenders.size / 2) : 1 + Logger.mainLogger.debug( + `collectCycleData: Cycle ${cycle.counter} requires ${requiredSenders} senders, current count: ${receivedCycleTracker[cycle.counter]['received']}` + ) + + if (receivedCycleTracker[cycle.counter]['received'] >= requiredSenders) { + nestedCountersInstance.countEvent('collectCycleData', 'enough_senders_process_' + cycle.mode, 1) + Logger.mainLogger.debug(`collectCycleData: Cycle ${cycle.counter} has enough senders, processing`) + + let bestScore = 0 + let bestMarker = '' + let prevMarker = '' + + if (cachedCycleRecords.length === 0) { + updateCacheFromDB() + .then(() => { + if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter > 1) { + Logger.mainLogger.debug(`updateCacheFromDB: No previous marker found for cycle ${cycle.counter}`) + } + processCycleWithPrevMarker() + }) + .catch((error) => { + Logger.mainLogger.error(`updateCacheFromDB: Error updating cache from db: ${error}`) + }) + } else { + processCycleWithPrevMarker() + } + + function processCycleWithPrevMarker() { + if (cachedCycleRecords.length > 0 && cycle.counter - cachedCycleRecords[0].counter === 1) { + prevMarker = cachedCycleRecords[0].marker + Logger.mainLogger.debug(`collectCycleData: Previous marker for scoring: ${prevMarker}`) + } else { + Logger.mainLogger.debug(`collectCycleData: No previous marker found for cycle ${cycle.counter}`) + return + } + const markers = Object.entries(receivedCycleTracker[cycle.counter]) + .filter(([key]) => key !== 'saved' && key !== 'received') + .map(([, value]) => value) + + Logger.mainLogger.debug( + `collectCycleData: Found ${markers.length} different markers for cycle ${cycle.counter}` + ) + + for (const marker of markers) { + const scores = [] + for (const signer of marker['certSigners']) { + const score = scoreCert(signer as string, prevMarker) + scores.push(score) + Logger.mainLogger.debug(`collectCycleData: Cert from ${signer} scored ${score}`) + } + const sum = scores + .sort((a, b) => b - a) + .slice(0, 3) + .reduce((sum, score) => (sum += score), 0) + + Logger.mainLogger.debug(`collectCycleData: Marker ${marker['cycleInfo'].marker} scored ${sum}`) + + if (sum > bestScore) { + bestScore = sum + bestMarker = marker['cycleInfo'].marker + Logger.mainLogger.debug(`collectCycleData: New best marker: ${bestMarker} with score ${bestScore}`) + } + } + + Logger.mainLogger.debug( + `collectCycleData: Processing cycle ${cycle.counter} with best marker ${bestMarker}, score: ${bestScore}` + ) + processCycles([receivedCycleTracker[cycle.counter][bestMarker].cycleInfo as P2PTypes.CycleCreatorTypes.CycleData]) + receivedCycleTracker[cycle.counter]['saved'] = true + + nestedCountersInstance.countEvent('collectCycleData', 'cycle_processed_successfully_' + cycle.mode, 1) + + ArchiverLogging.logDataSync({ + sourceArchiver: senderInfo, + targetArchiver: config.ARCHIVER_IP, + cycle: cycle.counter, + dataType: 'CYCLE_RECORD', + dataHash: bestMarker, + status: 'COMPLETE', + operationId, + metrics: { + duration: Date.now() - startTime, + dataSize: StringUtils.safeStringify(receivedCycleTracker[cycle.counter][bestMarker].cycleInfo).length, + }, + }) + } + } + } + + if (Object.keys(receivedCycleTracker).length > maxCyclesInCycleTracker) { + nestedCountersInstance.countEvent('collectCycleData', 'cleanup_old_cycles', 1) + Logger.mainLogger.debug( + `collectCycleData: Cleaning up old cycles, current count: ${Object.keys(receivedCycleTracker).length}` + ) + for (const counter of Object.keys(receivedCycleTracker)) { + if (parseInt(counter) < getCurrentCycleCounter() - maxCyclesInCycleTracker) { + let totalTimes = receivedCycleTracker[counter]['received'] + let logCycle = false + + const markers = Object.entries(receivedCycleTracker[counter]) + .filter(([key]) => key !== 'saved' && key !== 'received') + .map(([, value]) => value) + + if (markers.length > 1) { + logCycle = true + nestedCountersInstance.countEvent('collectCycleData', 'multiple_markers_for_cycle', 1) + } + + for (const marker of markers) { + Logger.mainLogger.debug( + 'Cycle', + counter, + marker, + logCycle ? StringUtils.safeStringify([...receivedCycleTracker[counter][marker]['certSigners']]) : '', + logCycle ? receivedCycleTracker[counter][marker] : '' + ) + } + if (logCycle) Logger.mainLogger.debug(`Cycle ${counter} has ${markers.length} different markers!`) + Logger.mainLogger.debug(`Received ${totalTimes} times for cycle counter ${counter}`) + delete receivedCycleTracker[counter] + } + } + } +} + +function validateCerts( + certs: P2PTypes.CycleCreatorTypes.CycleCert[], + certSigners: Set, + inpMarker: string, + cycleData: P2PTypes.CycleCreatorTypes.CycleData +) { + nestedCountersInstance.countEvent('validateCerts', 'validation', 1) + Logger.mainLogger.debug(`validateCerts: Validating ${certs.length} certificates against marker ${inpMarker}`) + + for (const cert of certs) { + const cleanCert: P2PTypes.CycleCreatorTypes.CycleCert = { + marker: cert.marker, + sign: cert.sign, + } + if (cleanCert.marker !== inpMarker) { + nestedCountersInstance.countEvent('validateCerts', 'markerMismatch', 1) + validationTracker.add({ cycle: cycleData }) + return false + } + if (NodeList.activeListByIdSorted.some((node) => node.publicKey === cleanCert.sign.owner) === false) { + nestedCountersInstance.countEvent('validateCerts', 'badOwner', 1) + Logger.mainLogger.warn(`validateCerts: bad owner ${cleanCert.sign.owner} not found in active nodes`) + return false + } + if (certSigners.has(cert.sign.owner)) { + nestedCountersInstance.countEvent('validateCerts', 'skipExistingSigner', 1) + Logger.mainLogger.debug(`validateCerts: Skipping already verified cert from ${cert.sign.owner}`) + continue + } + if (!Crypto.verify(cleanCert)) { + nestedCountersInstance.countEvent('validateCerts', 'badSignature', 1) + Logger.mainLogger.warn(`validateCerts: bad signature from ${cleanCert.sign.owner}`) + return false + } + nestedCountersInstance.countEvent('validateCerts', 'validCert', 1) + } + + Logger.mainLogger.debug(`validateCerts: All certificates validated successfully`) + return true +} + +export function scoreCert(pubKey: string, prevMarker: P2PTypes.CycleCreatorTypes.CycleMarker): number { + try { + const node = NodeList.byPublicKey.get(pubKey) + const id = node.id + const obj = { id } + const hid = Crypto.hashObj(obj) + + const out = XOR(prevMarker, hid) + + if (config.nerfNonFoundationCertScores && !node.foundationNode) { + return out & 0x0fffffff + } + + return out + } catch (err) { + Logger.mainLogger.error('scoreCert ERR:', err) + return 0 + } +} + +function getRecordWithoutPostQ3Changes(cycle: P2PTypes.CycleCreatorTypes.CycleRecord) { + Logger.mainLogger.debug(`getRecordWithoutPostQ3Changes: Processing cycle ${cycle.counter}`) + + const cycleCopy = StringUtils.safeJsonParse(StringUtils.safeStringify(cycle)) + delete cycleCopy.marker + delete cycleCopy.certificates + cycleCopy.nodeListHash = '' + cycleCopy.archiverListHash = '' + cycleCopy.standbyNodeListHash = '' + cycleCopy.joinedConsensors.forEach((jc) => (jc.syncingTimestamp = 0)) + return cycleCopy +} + +export async function syncCycleData(cycle: number): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + let success = false + + Logger.mainLogger.debug(`syncCycleData: Starting sync for cycle ${cycle}`) + Logger.mainLogger.debug(`syncCycleData: Active nodes count: ${NodeList.activeListByIdSorted.length}`) + + while (!success && retryCount < MAX_RETRIES) { + try { + Logger.mainLogger.debug(`syncCycleData: Attempt ${retryCount + 1} for cycle ${cycle}`) + + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: cycle, + end: cycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + + if (res && res.cycleInfo && res.cycleInfo.length > 0) { + const cycleData = res.cycleInfo[0] + Logger.mainLogger.debug(`syncCycleData: Received data for cycle ${cycle}, marker: ${cycleData.marker}`) + + if (!validateCycleData(cycleData)) { + Logger.mainLogger.error(`syncCycleData: Invalid cycle data for cycle ${cycle}`) + Logger.mainLogger.error(`syncCycleData: Cycle validation failed, checking marker computation...`) + nestedCountersInstance.countEvent('archiver', 'cycle_validation_failed - ' + cycle) + + const cycleDataCopy = { ...cycleData } + delete cycleDataCopy.marker + const computedMarker = Cycles.computeCycleMarker(cycleDataCopy) + Logger.mainLogger.error( + `syncCycleData: Computed marker: ${computedMarker}, received marker: ${cycleData.marker}` + ) + + retryCount++ + continue + } + + await processCycles([cycleData]) + Logger.mainLogger.debug(`syncCycleData: Successfully synced and processed cycle ${cycle}`) + success = true + return true + } else { + Logger.mainLogger.error( + `syncCycleData: Failed to get cycle data for cycle ${cycle}, attempt ${retryCount + 1} of ${MAX_RETRIES}` + ) + retryCount++ + } + } catch (error) { + Logger.mainLogger.error(`syncCycleData: Error syncing cycle data for cycle ${cycle}: ${error}`) + retryCount++ + } + } + + Logger.mainLogger.error(`syncCycleData: All attempts to sync cycle ${cycle} failed`) + return false +} + +// Import these from other files to avoid circular dependencies +import { queryFromArchivers } from '../API' +import { RequestDataType, ArchiverCycleResponse } from './types' + +const QUERY_TIMEOUT_MAX = 30 // 30seconds \ No newline at end of file diff --git a/src/Data/dataComparison.ts b/src/Data/dataComparison.ts new file mode 100644 index 00000000..fd3a96f5 --- /dev/null +++ b/src/Data/dataComparison.ts @@ -0,0 +1,154 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as Logger from '../Logger' +import * as CycleDB from '../dbstore/cycles' +import * as ReceiptDB from '../dbstore/receipts' +import * as OriginalTxDB from '../dbstore/originalTxsData' +import { Utils as StringUtils } from '@shardeum-foundation/lib-types' +import { config } from '../Config' +import { + CompareResponse, + ArchiverCycleResponse, + ArchiverReceiptResponse, + ArchiverOriginalTxResponse, + RequestDataType +} from './types' +import { queryFromArchivers } from '../API' + +const QUERY_TIMEOUT_MAX = 30 // 30seconds + +export async function compareWithOldOriginalTxsData(lastStoredOriginalTxCycle = 0): Promise { + const numberOfCyclesTocompare = 10 + let success = false + let matchedCycle = 0 + const endCycle = lastStoredOriginalTxCycle + const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 + const response = (await queryFromArchivers( + RequestDataType.ORIGINALTX, + { + startCycle, + endCycle, + type: 'tally', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverOriginalTxResponse + + if (!response || !response.originalTxs) { + Logger.mainLogger.error(`Can't fetch original tx data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + return { success, matchedCycle } + } + const downloadedOriginalTxsByCycles = response.originalTxs as OriginalTxDB.OriginalTxDataCount[] + + const oldOriginalTxCountByCycle = await OriginalTxDB.queryOriginalTxDataCountByCycles(startCycle, endCycle) + + for (let i = 0; i < downloadedOriginalTxsByCycles.length; i++) { + const downloadedOriginalTx = downloadedOriginalTxsByCycles[i] + const oldOriginalTx = oldOriginalTxCountByCycle[i] + Logger.mainLogger.debug(downloadedOriginalTx, oldOriginalTx) + if ( + !downloadedOriginalTx || + !oldOriginalTx || + downloadedOriginalTx.cycle !== oldOriginalTx.cycle || + downloadedOriginalTx.originalTxDataCount !== oldOriginalTx.originalTxDataCount + ) { + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedOriginalTx.cycle + } + success = true + return { success, matchedCycle } +} + +export async function compareWithOldReceiptsData(lastStoredReceiptCycle = 0): Promise { + const numberOfCyclesTocompare = 10 + let success = false + let matchedCycle = 0 + const endCycle = lastStoredReceiptCycle + const startCycle = endCycle - numberOfCyclesTocompare > 0 ? endCycle - numberOfCyclesTocompare : 0 + const response = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle, + endCycle, + type: 'tally', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (!response || !response.receipts) { + Logger.mainLogger.error(`Can't fetch receipts data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + return { success, matchedCycle } + } + const downloadedReceiptCountByCycles = response.receipts as ReceiptDB.ReceiptCount[] + + const oldReceiptCountByCycle = await ReceiptDB.queryReceiptCountByCycles(startCycle, endCycle) + for (let i = 0; i < downloadedReceiptCountByCycles.length; i++) { + const downloadedReceipt = downloadedReceiptCountByCycles[i] + const oldReceipt = oldReceiptCountByCycle[i] + Logger.mainLogger.debug(downloadedReceipt, oldReceipt) + if ( + !downloadedReceipt || + !oldReceipt || + downloadedReceipt.cycle !== oldReceipt.cycle || + downloadedReceipt.receiptCount !== oldReceipt.receiptCount + ) { + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedReceipt.cycle + } + success = true + return { success, matchedCycle } +} + +export async function compareWithOldCyclesData(lastCycleCounter = 0): Promise { + try { + const numberOfCyclesTocompare = 10 + const start = lastCycleCounter - numberOfCyclesTocompare + const end = lastCycleCounter + const response = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start, + end, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (!response && !response.cycleInfo) { + throw Error(`Can't fetch data from cycle ${start} to cycle ${end} from archivers`) + } + const downloadedCycles = response.cycleInfo + const oldCycles = await CycleDB.queryCycleRecordsBetween(start, end) + let success = false + let matchedCycle = 0 + for (let i = 0; i < downloadedCycles.length; i++) { + const downloadedCycle = downloadedCycles[i] + const oldCycle = oldCycles[i] + if ( + !downloadedCycle || + !oldCycle || + StringUtils.safeStringify(downloadedCycle) !== StringUtils.safeStringify(oldCycle) + ) { + if (config.VERBOSE) { + Logger.mainLogger.error('Mismatched cycle Number', downloadedCycle.counter, oldCycle.counter) + } + return { + success, + matchedCycle, + } + } + success = true + matchedCycle = downloadedCycle.counter + } + return { success, matchedCycle } + } catch (error) { + Logger.mainLogger.error('compareWithOldCyclesData error: ' + error) + return { success: false, matchedCycle: 0 } + } +} \ No newline at end of file diff --git a/src/Data/dataRequests.ts b/src/Data/dataRequests.ts new file mode 100644 index 00000000..81fbd1e5 --- /dev/null +++ b/src/Data/dataRequests.ts @@ -0,0 +1,42 @@ +import * as P2P from '../P2P' +import * as Logger from '../Logger' +import * as NodeList from '../NodeList' +import { config } from '../Config' +import { DataRequest, DataRequestTypes } from './types' + +export async function sendDataRequest( + nodeInfo: NodeList.ConsensusNodeInfo, + requestType: DataRequestTypes[keyof DataRequestTypes] +): Promise { + const dataRequest = { + subscriber_id: config.ARCHIVER_PUBLIC_KEY, + data: requestType, + } + const REQUEST_NETCONFIG_TIMEOUT_SECOND = 2 + let response = null + try { + response = await P2P.postJson( + `http://${nodeInfo.ip}:${nodeInfo.port}/subscribe`, + dataRequest, + REQUEST_NETCONFIG_TIMEOUT_SECOND + ) + } catch (error) { + if (error.message && error.message.includes('ECONNREFUSED')) { + Logger.mainLogger.warn(`Unable to connect to node ${nodeInfo.ip}:${nodeInfo.port}: ${error.message}`) + } else { + Logger.mainLogger.error(`Error sending data request to node ${nodeInfo.ip}:${nodeInfo.port}: ${error}`) + } + return false + } + if (response.success) { + Logger.mainLogger.debug( + `${requestType} request sent to node ${nodeInfo.publicKey}. Response: ${JSON.stringify(response)}` + ) + return true + } else { + Logger.mainLogger.warn( + `${requestType} request failed for node ${nodeInfo.publicKey}. Response: ${JSON.stringify(response)}` + ) + return false + } +} \ No newline at end of file diff --git a/src/Data/dataSync.ts b/src/Data/dataSync.ts new file mode 100644 index 00000000..f49eec8d --- /dev/null +++ b/src/Data/dataSync.ts @@ -0,0 +1,802 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as Logger from '../Logger' +import * as NodeList from '../NodeList' +import * as State from '../State' +import * as Cycles from './Cycles' +import * as Utils from '../Utils' +import * as CycleDB from '../dbstore/cycles' +import * as ReceiptDB from '../dbstore/receipts' +import * as OriginalTxDB from '../dbstore/originalTxsData' +import * as AccountDB from '../dbstore/accounts' +import * as P2P from '../P2P' +import { config } from '../Config' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { ArchiverLogging } from '../profiler/archiverLogging' +import { + ChangeSquasher, + parse, + applyNodeListChange, + activeNodeCount, + totalNodeCount +} from './CycleParser' +import { validateCycle } from './Cycles' +import { + ArchiverCycleResponse, + ArchiverReceiptResponse, + ArchiverOriginalTxResponse, + ArchiverTotalDataResponse, + ArchiverAccountResponse, + RequestDataType, + ArchiverWithRetries, + StoredReceiptObject +} from './types' +import { queryFromArchivers } from '../API' +import { fetchCycleRecords, getNewestCycleFromArchivers, getCurrentCycleCounter } from './Cycles' +import { getTotalDataFromArchivers } from './missingFunctions' + +const QUERY_TIMEOUT_MAX = 30 // 30seconds +const MAX_CYCLES_PER_REQUEST = config.REQUEST_LIMIT.MAX_CYCLES_PER_REQUEST +const MAX_RECEIPTS_PER_REQUEST = config.REQUEST_LIMIT.MAX_RECEIPTS_PER_REQUEST +const MAX_ORIGINAL_TXS_PER_REQUEST = config.REQUEST_LIMIT.MAX_ORIGINAL_TXS_PER_REQUEST +const MAX_BETWEEN_CYCLES_PER_REQUEST = config.REQUEST_LIMIT.MAX_BETWEEN_CYCLES_PER_REQUEST +const MAX_ACCOUNTS_PER_REQUEST = config.REQUEST_LIMIT.MAX_ACCOUNTS_PER_REQUEST +const GENESIS_ACCOUNTS_CYCLE_RANGE = { + startCycle: 0, + endCycle: 5, +} +import { validateCycleData } from './Cycles' +import { syncV2 } from '../sync-v2' +import { getLastUpdatedCycle, updateLastUpdatedCycle } from '../utils/cycleTracker' +import { getCurrentConsensusRadius } from './networkConfig' +import { Result } from 'neverthrow' + +// Placeholder function declarations that will be implemented by Data.ts +export async function storeReceiptData( + receipts: any[], + senderInfo: string, + validate: boolean, + saveOnlyGossipData: boolean, + gossipReceipt: boolean +): Promise { + // Implementation in Data.ts +} + +export async function storeOriginalTxData( + originalTxs: any[], + senderInfo: string, + saveOnlyGossipData: boolean +): Promise { + // Implementation in Data.ts +} + +export async function storeAccountData(data: { accounts?: any[]; receipts?: any[] }): Promise { + // Implementation in Data.ts +} + +export async function storeCycleData(cycles: P2PTypes.CycleCreatorTypes.CycleData[]): Promise { + // Implementation in Data.ts +} + +export async function processCycles(cycles: P2PTypes.CycleCreatorTypes.CycleData[]): Promise { + // Implementation in Data.ts +} + +export async function syncGenesisAccountsFromConsensor( + totalGenesisAccounts = 0, + firstConsensor: NodeList.ConsensusNodeInfo +): Promise { + if (totalGenesisAccounts <= 0) return + let startAccount = 0 + let totalDownloadedAccounts = 0 + while (startAccount <= totalGenesisAccounts) { + Logger.mainLogger.debug(`Downloading accounts from ${startAccount}`) + const response = (await P2P.getJson( + `http://${firstConsensor.ip}:${firstConsensor.port}/genesis_accounts?start=${startAccount}`, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (response && response.accounts) { + if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { + Logger.mainLogger.debug('Download completed for accounts') + } + Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) + await storeAccountData({ accounts: response.accounts }) + totalDownloadedAccounts += response.accounts.length + startAccount += MAX_ACCOUNTS_PER_REQUEST + } else { + Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') + } + } + Logger.mainLogger.debug(`Total downloaded accounts`, totalDownloadedAccounts) + Logger.mainLogger.debug('Sync genesis accounts completed!') +} + +export async function syncGenesisTransactionsFromConsensor( + totalGenesisTransactions = 0, + firstConsensor: NodeList.ConsensusNodeInfo +): Promise { + if (totalGenesisTransactions <= 0) return + let startTransaction = 0 + let endTransaction = startTransaction + MAX_ACCOUNTS_PER_REQUEST + let page = 1 + let complete = false + while (!complete) { + Logger.mainLogger.debug(`Downloading transactions from ${startTransaction} to ${endTransaction}`) + const response = (await P2P.getJson( + `http://${firstConsensor.ip}:${firstConsensor.port}/genesis_transactions?start=${startTransaction}&end=${endTransaction}&page=${page}`, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (response && response.transactions) { + if (response.transactions.length < MAX_ACCOUNTS_PER_REQUEST) { + complete = true + Logger.mainLogger.debug('Download completed for transactions') + } + Logger.mainLogger.debug(`Downloaded transactions`, response.transactions.length) + await storeAccountData({ receipts: response.transactions }) + startTransaction = endTransaction + 1 + endTransaction += MAX_ACCOUNTS_PER_REQUEST + page++ + } else { + Logger.mainLogger.debug('Genesis Transactions Query', 'Invalid download response') + } + } + Logger.mainLogger.debug('Sync genesis transactions completed!') +} + +export async function buildNodeListFromStoredCycle( + lastStoredCycle: P2PTypes.CycleCreatorTypes.CycleData +): Promise { + Logger.mainLogger.debug('lastStoredCycle', lastStoredCycle) + Logger.mainLogger.debug('buildNodeListFromStoredCycle:') + Logger.mainLogger.debug(`Syncing till cycle ${lastStoredCycle.counter}...`) + const cyclesToGet = 2 * Math.floor(Math.sqrt(lastStoredCycle.active)) + 2 + Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) + + const CycleChain = [] + const squasher = new ChangeSquasher() + + CycleChain.unshift(lastStoredCycle) + squasher.addChange(parse(CycleChain[0])) + + do { + let end: number = CycleChain[0].counter - 1 + let start: number = end - cyclesToGet + if (start < 0) start = 0 + if (end < start) end = start + Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) + const prevCycles = await CycleDB.queryCycleRecordsBetween(start, end) + + if (prevCycles.length < 1) throw new Error('Got empty previous cycles') + + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + let prepended = 0 + for (const prevCycle of prevCycles) { + CycleChain.unshift(prevCycle) + squasher.addChange(parse(prevCycle)) + prepended++ + + if ( + squasher.final.updated.length >= activeNodeCount(lastStoredCycle) && + squasher.final.added.length >= totalNodeCount(lastStoredCycle) + ) { + break + } + } + + Logger.mainLogger.debug( + `Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(lastStoredCycle)}` + ) + Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(lastStoredCycle)}`) + if (squasher.final.added.length < totalNodeCount(lastStoredCycle)) + Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + lastStoredCycle.counter) + + if (prepended < 1) throw new Error('Unable to prepend any previous cycles') + } while ( + squasher.final.updated.length < activeNodeCount(lastStoredCycle) || + squasher.final.added.length < totalNodeCount(lastStoredCycle) + ) + + applyNodeListChange(squasher.final) + Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) + Cycles.setCurrentCycleCounter(lastStoredCycle.counter) + Cycles.setCurrentCycleMarker(lastStoredCycle.marker) + Cycles.setCurrentCycleDuration(lastStoredCycle.duration) + Logger.mainLogger.debug('Latest cycle after sync', lastStoredCycle.counter) +} + +export async function syncCyclesAndNodeList(lastStoredCycleCount = 0): Promise { + Logger.mainLogger.debug('Getting newest cycle...') + const cycleToSyncTo = await getNewestCycleFromArchivers() + Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) + Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) + + const cyclesToGet = 2 * Math.floor(Math.sqrt(cycleToSyncTo.active)) + 2 + Logger.mainLogger.debug(`Cycles to get is ${cyclesToGet}`) + + const CycleChain = [] + const squasher = new ChangeSquasher() + + CycleChain.unshift(cycleToSyncTo) + squasher.addChange(parse(CycleChain[0])) + + do { + let end: number = CycleChain[0].counter - 1 + let start: number = end - cyclesToGet + if (start < 0) start = 0 + if (end < start) end = start + Logger.mainLogger.debug(`Getting cycles ${start} - ${end}...`) + const prevCycles = await fetchCycleRecords(start, end) + + if (prevCycles.length < 1) throw new Error('Got empty previous cycles') + + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + let prepended = 0 + for (const prevCycle of prevCycles) { + if (validateCycle(prevCycle, CycleChain[0]) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + break + } + CycleChain.unshift(prevCycle) + squasher.addChange(parse(prevCycle)) + prepended++ + + if ( + squasher.final.updated.length >= activeNodeCount(cycleToSyncTo) && + squasher.final.added.length >= totalNodeCount(cycleToSyncTo) + ) { + break + } + } + + Logger.mainLogger.debug(`Got ${squasher.final.updated.length} active nodes, need ${activeNodeCount(cycleToSyncTo)}`) + Logger.mainLogger.debug(`Got ${squasher.final.added.length} total nodes, need ${totalNodeCount(cycleToSyncTo)}`) + if (squasher.final.added.length < totalNodeCount(cycleToSyncTo)) + Logger.mainLogger.debug('Short on nodes. Need to get more cycles. Cycle:' + cycleToSyncTo.counter) + + if (prepended < 1) throw new Error('Unable to prepend any previous cycles') + } while ( + squasher.final.updated.length < activeNodeCount(cycleToSyncTo) || + squasher.final.added.length < totalNodeCount(cycleToSyncTo) + ) + + applyNodeListChange(squasher.final) + Logger.mainLogger.debug('NodeList after sync', NodeList.getActiveList()) + + for (let i = 0; i < CycleChain.length; i++) { + const record = CycleChain[i] + Cycles.CycleChain.set(record.counter, { ...record }) + if (i === CycleChain.length - 1) await storeCycleData(CycleChain) + Cycles.setCurrentCycleCounter(record.counter) + Cycles.setCurrentCycleMarker(record.marker) + } + Logger.mainLogger.debug('Cycle chain is synced. Size of CycleChain', Cycles.CycleChain.size) + + let endCycle = CycleChain[0].counter - 1 + Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) + if (endCycle > lastStoredCycleCount) { + Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) + } + let savedCycleRecord = CycleChain[0] + while (endCycle > lastStoredCycleCount) { + let nextEnd: number = endCycle - MAX_CYCLES_PER_REQUEST + if (nextEnd < 0) nextEnd = 0 + Logger.mainLogger.debug(`Getting cycles ${nextEnd} - ${endCycle} ...`) + const prevCycles = await fetchCycleRecords(nextEnd, endCycle) + + if (!prevCycles || prevCycles.length < 1) throw new Error('Got empty previous cycles') + prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + const combineCycles = [] + for (const prevCycle of prevCycles) { + if (validateCycle(prevCycle, savedCycleRecord) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) + break + } + savedCycleRecord = prevCycle + combineCycles.push(prevCycle) + } + await storeCycleData(combineCycles) + endCycle = nextEnd - 1 + } +} + +export async function syncCyclesAndNodeListV2( + activeArchivers: State.ArchiverNodeInfo[], + lastStoredCycleCount = 0 +): Promise { + Logger.mainLogger.debug('Syncing validators and latest cycle...') + const syncResult = await syncV2(activeArchivers) + let cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData + if (syncResult.isOk()) { + cycleToSyncTo = syncResult.value + } else { + throw syncResult.error + } + + Logger.mainLogger.debug('cycleToSyncTo', cycleToSyncTo) + Logger.mainLogger.debug(`Syncing till cycle ${cycleToSyncTo.counter}...`) + + await processCycles([cycleToSyncTo]) + + await downloadOldCycles(cycleToSyncTo, lastStoredCycleCount) + + return true +} + +export async function syncCyclesBetweenCycles(lastStoredCycle = 0, cycleToSyncTo = 0): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + + let startCycle = lastStoredCycle + let endCycle = startCycle + MAX_CYCLES_PER_REQUEST + + while (cycleToSyncTo > startCycle) { + if (endCycle > cycleToSyncTo) endCycle = cycleToSyncTo + Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) + + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + + if (res && res.cycleInfo) { + const cycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] + Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) + + let validCyclesCount = 0 + for (const cycle of cycles) { + if (!validateCycleData(cycle)) { + Logger.mainLogger.debug('Found invalid cycle data') + continue + } + await processCycles([cycle]) + validCyclesCount++ + } + + success = true + + if (cycles.length < MAX_CYCLES_PER_REQUEST || validCyclesCount === 0) { + startCycle += Math.max(cycles.length, 1) + endCycle = startCycle + MAX_CYCLES_PER_REQUEST + if (startCycle >= cycleToSyncTo) { + Logger.mainLogger.debug('Sync cycles completed!') + return true + } + break + } + } else { + Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for cycle download') + return false + } + } + } + + if (success) { + startCycle = endCycle + 1 + endCycle += MAX_CYCLES_PER_REQUEST + } + } + + return true +} + +export async function syncReceipts(): Promise { + const MAX_RETRIES = 3 + let retryCount = 0 + + const lastUpdatedCycle = getLastUpdatedCycle() + Logger.mainLogger.debug(`[syncReceipts] Last updated cycle from tracker: ${lastUpdatedCycle}`) + + let startCycle = 0 + if (lastUpdatedCycle > 0) { + Logger.mainLogger.info(`[syncReceipts] Starting receipt sync from last updated cycle: ${lastUpdatedCycle}`) + startCycle = Math.max(lastUpdatedCycle - config.checkpoint.syncCycleBuffer, 0) + await syncReceiptsByCycle(startCycle) + return + } + + let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalReceipts < 0) { + return + } + + let { totalReceipts } = response + if (totalReceipts < 1) return + + let complete = false + let start = 0 + let end = start + MAX_RECEIPTS_PER_REQUEST + + while (!complete) { + if (end >= totalReceipts) { + response = await getTotalDataFromArchivers() + if (response && response.totalReceipts > 0) { + if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts + Logger.mainLogger.debug('totalReceiptsToSync', totalReceipts) + } + } + + Logger.mainLogger.debug(`Downloading receipts from ${start} to ${end}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + start: start, + end: end, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (res && res.receipts) { + const downloadedReceipts = res.receipts as ReceiptDB.Receipt[] + Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) + await storeReceiptData(downloadedReceipts, '', false, false, true) + success = true + + if (downloadedReceipts.length < MAX_RECEIPTS_PER_REQUEST) { + start += downloadedReceipts.length + end = start + MAX_RECEIPTS_PER_REQUEST + response = await getTotalDataFromArchivers() + if (response && response.totalReceipts > 0) { + if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts + if (start >= totalReceipts) { + complete = true + Logger.mainLogger.debug('Download receipts completed') + } + } + } + } else { + Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for receipt download') + start = end + 1 + end += MAX_RECEIPTS_PER_REQUEST + if (start >= totalReceipts) { + complete = true + } + } + } + } + + if (success) { + start = end + 1 + end += MAX_RECEIPTS_PER_REQUEST + } + } + + Logger.mainLogger.debug('Sync receipts data completed!') +} + +class ArchiverSelector { + private archivers: ArchiverWithRetries[] + private currentIndex: number = 0 + private readonly maxRetries: number = 3 + + constructor() { + this.archivers = State.otherArchivers.map((archiver) => ({ + archiver, + retriesLeft: this.maxRetries, + })) + Utils.shuffleArray(this.archivers) + } + + getCurrentArchiver(): State.ArchiverNodeInfo | null { + if (this.currentIndex >= this.archivers.length) { + return null + } + return this.archivers[this.currentIndex].archiver + } + + markCurrentArchiverFailed(): State.ArchiverNodeInfo | null { + if (this.currentIndex >= this.archivers.length) { + return null + } + + this.archivers[this.currentIndex].retriesLeft-- + + if (this.archivers[this.currentIndex].retriesLeft <= 0) { + this.currentIndex++ + } + + return this.getCurrentArchiver() + } + + hasMoreArchivers(): boolean { + return this.currentIndex < this.archivers.length + } +} + +export async function syncReceiptsByCycle(lastStoredReceiptCycle = 0, cycleToSyncTo = 0): Promise { + if (lastStoredReceiptCycle === 0) { + const trackedCycle = getLastUpdatedCycle() + if (trackedCycle > 0) { + Logger.mainLogger.info(`[syncReceiptsByCycle] Using last updated cycle from tracker: ${trackedCycle}`) + lastStoredReceiptCycle = Math.max(trackedCycle - config.checkpoint.syncCycleBuffer, 0) + } + } + + let totalCycles = cycleToSyncTo + let totalReceipts = 0 + if (cycleToSyncTo === 0) { + const response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalReceipts < 0) { + return false + } + totalCycles = response.totalCycles + totalReceipts = response.totalReceipts + } + let startCycle = lastStoredReceiptCycle + let endCycle = startCycle + MAX_BETWEEN_CYCLES_PER_REQUEST + let receiptsCountToSyncBetweenCycles = 0 + let savedReceiptsCountBetweenCycles = 0 + let totalSavedReceiptsCount = 0 + let archiverSelector = new ArchiverSelector() + + while (true) { + if (endCycle > totalCycles) { + endCycle = totalCycles + totalSavedReceiptsCount = await ReceiptDB.queryReceiptCount() + } + if (cycleToSyncTo > 0) { + if (startCycle > cycleToSyncTo) { + Logger.mainLogger.debug(`Sync receipts data completed!`) + return true + } + if (endCycle > cycleToSyncTo) endCycle = cycleToSyncTo + } + Logger.mainLogger.debug(`Downloading receipts between cycles ${startCycle} to ${endCycle}`) + + const currentArchiver = archiverSelector.getCurrentArchiver() + if (!currentArchiver) { + Logger.mainLogger.error('No more archivers available for syncing receipts') + return false + } + + const res = (await P2P.getJson( + `http://${currentArchiver.ip}:${currentArchiver.port}/receipt?startCycle=${startCycle}&endCycle=${endCycle}&type=tally`, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (res && res.receipts) { + const downloadedReceiptsByCycles = res.receipts as ReceiptDB.ReceiptCount[] + for (const receiptData of downloadedReceiptsByCycles) { + receiptsCountToSyncBetweenCycles += receiptData.receiptCount + } + Logger.mainLogger.debug( + `Total receipts to sync between cycles ${startCycle} to ${endCycle}`, + receiptsCountToSyncBetweenCycles + ) + startCycle = endCycle + 1 + endCycle += MAX_BETWEEN_CYCLES_PER_REQUEST + } else { + Logger.mainLogger.error( + `Failed to download receipts tally between cycles ${startCycle} to ${endCycle} from archiver ${currentArchiver.ip}:${currentArchiver.port}` + ) + archiverSelector.markCurrentArchiverFailed() + if (!archiverSelector.hasMoreArchivers()) { + Logger.mainLogger.error('All archivers failed for syncing receipts') + return false + } + continue + } + + Logger.mainLogger.debug( + `Downloading receipts between cycles ${lastStoredReceiptCycle} to ${startCycle - 1}`, + receiptsCountToSyncBetweenCycles + ) + + const MAX_RETRIES = 3 + let start = 0 + let end = start + MAX_RECEIPTS_PER_REQUEST + let hasMoreReceiptsToDownload = receiptsCountToSyncBetweenCycles > 0 + + while (hasMoreReceiptsToDownload) { + let success = false + let retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + Logger.mainLogger.debug(`Downloading receipts from index ${start} to ${end}`) + const res2 = (await P2P.getJson( + `http://${currentArchiver.ip}:${currentArchiver.port}/receipt?startCycle=${lastStoredReceiptCycle}&endCycle=${ + startCycle - 1 + }&start=${start}&end=${end}`, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptResponse + + if (res2 && res2.receipts) { + const downloadedReceipts = res2.receipts as ReceiptDB.Receipt[] + Logger.mainLogger.debug(`Downloaded receipts`, downloadedReceipts.length) + const storageResult = await storeReceiptData(downloadedReceipts, '', false, false, true) as StoredReceiptObject + savedReceiptsCountBetweenCycles += storageResult.receipts.length + success = true + + if (downloadedReceipts.length === 0 || downloadedReceipts.length < MAX_RECEIPTS_PER_REQUEST) { + updateLastUpdatedCycle(startCycle - 1) + Logger.mainLogger.debug(`[syncReceiptsByCycle] Updated cycle tracker to cycle ${startCycle - 1}`) + hasMoreReceiptsToDownload = false + receiptsCountToSyncBetweenCycles = 0 + savedReceiptsCountBetweenCycles = 0 + break + } + } else { + Logger.mainLogger.debug(`Invalid download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for receipt download') + archiverSelector.markCurrentArchiverFailed() + if (!archiverSelector.hasMoreArchivers()) { + Logger.mainLogger.error('All archivers failed for syncing receipts') + return false + } + } + } + } + if (success) { + start = end + 1 + end += MAX_RECEIPTS_PER_REQUEST + } + } + + if (cycleToSyncTo === 0 && totalSavedReceiptsCount >= totalReceipts) { + Logger.mainLogger.debug('Sync receipts data by cycle completed!') + return true + } + } +} + +export async function syncCyclesAndTxsData(lastStoredCycle = 0, cycleToSyncTo = 0): Promise { + let response: ArchiverTotalDataResponse = await getTotalDataFromArchivers() + if (!response || response.totalCycles < 0) { + return + } + + let { totalCycles, totalReceipts } = response + if (totalCycles < 1) return + + let completeForCycle = false + let startCycle = lastStoredCycle + let endCycle = startCycle + MAX_CYCLES_PER_REQUEST + const MAX_RETRIES = 3 + let retryCount = 0 + + while (!completeForCycle || startCycle < endCycle) { + if (endCycle >= totalCycles) { + endCycle = totalCycles + completeForCycle = true + response = await getTotalDataFromArchivers() + if (response && response.totalCycles > 0) { + if (response.totalCycles > totalCycles) totalCycles = response.totalCycles + if (response.totalReceipts > totalReceipts) totalReceipts = response.totalReceipts + Logger.mainLogger.debug('totalCyclesToSync', totalCycles, 'totalReceiptsToSync', totalReceipts) + } + } + + if (!completeForCycle) { + Logger.mainLogger.debug(`Downloading cycles from ${startCycle} to ${endCycle}`) + let success = false + retryCount = 0 + + while (!success && retryCount < MAX_RETRIES) { + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (res && res.cycleInfo) { + const cycles = res.cycleInfo + Logger.mainLogger.debug(`Downloaded cycles`, cycles.length) + for (const cycle of cycles) { + if (!validateCycleData(cycle)) { + Logger.mainLogger.debug('Found invalid cycle data') + continue + } + await processCycles([cycle]) + } + success = true + + const highestCycle = cycles.reduce((max, cycle) => Math.max(max, cycle.counter), 0) + if (highestCycle > 0) { + updateLastUpdatedCycle(highestCycle) + Logger.mainLogger.debug(`[syncCyclesAndTxsData] Updated cycle tracker to cycle ${highestCycle}`) + } + + if (cycles.length < MAX_CYCLES_PER_REQUEST) { + startCycle += cycles.length + 1 + endCycle += cycles.length + MAX_CYCLES_PER_REQUEST + } + } else { + Logger.mainLogger.debug(`Invalid cycle download response, attempt ${retryCount + 1} of ${MAX_RETRIES}`) + retryCount++ + if (retryCount >= MAX_RETRIES) { + Logger.mainLogger.error('Max retries reached for cycle download') + } + } + } + if (success) { + startCycle = endCycle + 1 + endCycle += MAX_CYCLES_PER_REQUEST + } + } + } + Logger.mainLogger.debug('Sync Cycle, Receipt & Original-Tx data completed!') +} + +export const syncCyclesAndTxsDataBetweenCycles = async (lastStoredCycle = 0, cycleToSyncTo = 0): Promise => { + Logger.mainLogger.debug(`Syncing cycles and txs data between cycles ${lastStoredCycle} and ${cycleToSyncTo}`) + await syncCyclesBetweenCycles(lastStoredCycle, cycleToSyncTo) + await syncReceiptsByCycle(lastStoredCycle, cycleToSyncTo) +} + +async function downloadOldCycles( + cycleToSyncTo: P2PTypes.CycleCreatorTypes.CycleData, + lastStoredCycleCount: number +): Promise { + let endCycle = cycleToSyncTo.counter - 1 + Logger.mainLogger.debug('endCycle counter', endCycle, 'lastStoredCycleCount', lastStoredCycleCount) + if (endCycle > lastStoredCycleCount) { + Logger.mainLogger.debug(`Downloading old cycles from cycles ${lastStoredCycleCount} to cycle ${endCycle}!`) + } + + let savedCycleRecord = cycleToSyncTo + const MAX_RETRY_COUNT = 3 + let retryCount = 0 + while (endCycle > lastStoredCycleCount) { + let startCycle: number = endCycle - MAX_CYCLES_PER_REQUEST + if (startCycle < 0) startCycle = 0 + if (startCycle < lastStoredCycleCount) startCycle = lastStoredCycleCount + Logger.mainLogger.debug(`Getting cycles ${startCycle} - ${endCycle} ...`) + const res = (await queryFromArchivers( + RequestDataType.CYCLE, + { + start: startCycle, + end: endCycle, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverCycleResponse + if (!res || !res.cycleInfo || !Array.isArray(res.cycleInfo) || res.cycleInfo.length === 0) { + Logger.mainLogger.error(`Can't fetch data from cycle ${startCycle} to cycle ${endCycle} from archivers`) + if (retryCount < MAX_RETRY_COUNT) { + retryCount++ + continue + } else { + endCycle = startCycle - 1 + retryCount = 0 + } + } + + const prevCycles = res.cycleInfo as P2PTypes.CycleCreatorTypes.CycleData[] + if (prevCycles) prevCycles.sort((a, b) => (a.counter > b.counter ? -1 : 1)) + + const combineCycles: P2PTypes.CycleCreatorTypes.CycleData[] = [] + for (const prevCycle of prevCycles) { + if (validateCycle(prevCycle, savedCycleRecord) === false) { + Logger.mainLogger.error(`Record ${prevCycle.counter} failed validation`) + Logger.mainLogger.debug('fail', prevCycle, savedCycleRecord) + } + savedCycleRecord = prevCycle + combineCycles.push(prevCycle) + } + await storeCycleData(combineCycles) + endCycle = startCycle - 1 + } +} \ No newline at end of file diff --git a/src/Data/missingFunctions.ts b/src/Data/missingFunctions.ts new file mode 100644 index 00000000..02a87906 --- /dev/null +++ b/src/Data/missingFunctions.ts @@ -0,0 +1,356 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as Crypto from '../Crypto' +import * as NodeList from '../NodeList' +import * as State from '../State' +import * as P2P from '../P2P' +import * as Utils from '../Utils' +import * as Logger from '../Logger' +import { config } from '../Config' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { queryFromArchivers } from '../API' +import { + ArchiverTotalDataResponse, + RequestDataType, + ArchiverAccountResponse, + DataRequest, + DataRequestTypes, + ArchiverReceiptCountResponse +} from './types' +import { Utils as StringUtils } from '@shardeum-foundation/lib-types' +import * as AccountDB from '../dbstore/accounts' +import { storeAccountData } from './dataSync' + +const QUERY_TIMEOUT_MAX = 30 // 30seconds +const MAX_ACCOUNTS_PER_REQUEST = config.REQUEST_LIMIT.MAX_ACCOUNTS_PER_REQUEST +const GENESIS_ACCOUNTS_CYCLE_RANGE = { + startCycle: 0, + endCycle: 5, +} + +interface IncomingTimes { + quarterDuration: number + startQ1: number + startQ2: number + startQ3: number + startQ4: number + end: number +} + +interface JoinStatus { + isJoined: boolean +} + +export async function getTotalDataFromArchivers(): Promise { + const res = (await queryFromArchivers( + RequestDataType.TOTALDATA, + {}, + QUERY_TIMEOUT_MAX + )) as ArchiverTotalDataResponse | null + // @ts-ignore + if (!res || (res.success !== undefined && res.success === false)) { + return null + } + return res +} + +export function createDataRequest( + type: P2PTypes.SnapshotTypes.TypeName, + lastData: P2PTypes.SnapshotTypes.TypeIndex, + recipientPk: string +): DataRequest & Crypto.TaggedMessage { + return Crypto.tag>( + { + type, + lastData, + }, + recipientPk + ) +} + +export async function syncGenesisAccountsFromArchiver(): Promise { + let complete = false + let startAccount = 0 + let endAccount = startAccount + MAX_ACCOUNTS_PER_REQUEST + let totalGenesisAccounts = 0 + + const res = (await queryFromArchivers( + RequestDataType.ACCOUNT, + { startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle }, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Accounts Response', StringUtils.safeStringify(res)) + + totalGenesisAccounts = res.totalAccounts + if (totalGenesisAccounts === 0) return + + while (!complete) { + if (endAccount >= totalGenesisAccounts) { + endAccount = totalGenesisAccounts + complete = true + } + Logger.mainLogger.debug(`Downloading accounts from ${startAccount} to ${endAccount}`) + const response = (await queryFromArchivers( + RequestDataType.ACCOUNT, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + start: startAccount, + end: endAccount, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + + if (response && response.accounts) { + Logger.mainLogger.debug(`Downloaded accounts`, response.accounts.length) + await storeAccountData({ accounts: response.accounts }) + if (response.accounts.length < MAX_ACCOUNTS_PER_REQUEST) { + complete = true + Logger.mainLogger.debug('Download completed for accounts') + } + } else { + Logger.mainLogger.debug('Genesis Accounts Query', 'Invalid download response') + } + startAccount = endAccount + 1 + endAccount += MAX_ACCOUNTS_PER_REQUEST + } + Logger.mainLogger.debug('Sync genesis accounts completed!') +} + +export async function syncGenesisTransactionsFromArchiver(): Promise { + let complete = false + let startTransaction = 0 + let endTransaction = startTransaction + MAX_ACCOUNTS_PER_REQUEST + let totalGenesisTransactions = 0 + + const res = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + type: 'count', + }, + QUERY_TIMEOUT_MAX + )) as ArchiverReceiptCountResponse + if (config.VERBOSE) Logger.mainLogger.error('Genesis Total Transactions Response', StringUtils.safeStringify(res)) + + totalGenesisTransactions = res.countByCycles ? res.countByCycles.reduce((sum, item) => sum + item.count, 0) : 0 + if (totalGenesisTransactions === 0) return + + while (!complete) { + if (endTransaction >= totalGenesisTransactions) { + endTransaction = totalGenesisTransactions + complete = true + } + Logger.mainLogger.debug(`Downloading transactions from ${startTransaction} to ${endTransaction}`) + const response = (await queryFromArchivers( + RequestDataType.RECEIPT, + { + startCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.startCycle, + endCycle: GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle, + start: startTransaction, + end: endTransaction, + }, + QUERY_TIMEOUT_MAX + )) as ArchiverAccountResponse + + if (response && response.transactions) { + Logger.mainLogger.debug(`Downloaded transactions`, response.transactions.length) + await storeAccountData({ receipts: response.transactions }) + if (response.transactions.length < MAX_ACCOUNTS_PER_REQUEST) { + complete = true + Logger.mainLogger.debug('Download completed for transactions') + } + } else { + Logger.mainLogger.debug('Genesis Transactions Query', 'Invalid download response') + } + startTransaction = endTransaction + 1 + endTransaction += MAX_ACCOUNTS_PER_REQUEST + } + Logger.mainLogger.debug('Sync genesis transactions completed!') +} + +export function calcIncomingTimes(record: P2PTypes.CycleCreatorTypes.CycleRecord): IncomingTimes { + const SECOND = 1000 + const cycleDuration = record.duration * SECOND + const quarterDuration = cycleDuration / 4 + const start = record.start * SECOND + cycleDuration + const startQ1 = start + const startQ2 = start + quarterDuration + const startQ3 = start + quarterDuration * 2 + const startQ4 = start + quarterDuration * 3 + const end = start + cycleDuration + return { + quarterDuration, + startQ1, + startQ2, + startQ3, + startQ4, + end, + } +} + +export const clearDataSenders = async ( + dataSenders: Map, + socketClients: Map, + subsetNodesMapByConsensusRadius: Map, + unsubscribeDataSender: (publicKey: string) => Promise +): Promise => { + for (const [publicKey] of dataSenders) { + await unsubscribeDataSender(publicKey) + } + await Utils.sleep(2000) // Wait for 2s to make sure all dataSenders are unsubscribed + dataSenders.clear() + socketClients.clear() + subsetNodesMapByConsensusRadius.clear() +} + +export async function sendLeaveRequest(nodes: NodeList.ConsensusNodeInfo[]): Promise { + const REQUEST_DATA_TIMEOUT_SECOND = 2 // 2s timeout + for (const node of nodes) { + const leaveRequest = { + publicKey: State.getNodeInfo().publicKey, + nodeInfo: State.getNodeInfo(), + } + const taggedLeaveRequest = Crypto.tag(leaveRequest, node.publicKey) + Logger.mainLogger.info(`Sending leave request to node ${node.ip}:${node.port}`) + const response = await P2P.postJson( + `http://${node.ip}:${node.port}/archiverleave`, + taggedLeaveRequest, + REQUEST_DATA_TIMEOUT_SECOND + ) + Logger.mainLogger.debug('/archiverleave response', response, node.ip + ':' + node.port) + } +} + +export async function joinNetwork(nodeList: NodeList.ConsensusNodeInfo[], isFirstTime: boolean): Promise { + Logger.mainLogger.debug('Joining network...') + const submitResponse = await submitJoin(nodeList, isFirstTime) + if (!submitResponse) return false + const isJoined = await checkJoinStatus(nodeList) + return isJoined +} + +export async function submitJoin( + nodeList: NodeList.ConsensusNodeInfo[], + isFirstTime: boolean +): Promise { + const REQUEST_JOIN_TIMEOUT_SECOND = 10 // 10s timeout + const joinRequest = { + publicKey: State.getNodeInfo().publicKey, + nodeInfo: State.getNodeInfo(), + isFirstTime, + } + + for (const node of nodeList) { + const taggedJoinRequest = Crypto.tag(joinRequest, node.publicKey) + Logger.mainLogger.info(`Sending join request to node ${node.ip}:${node.port}`) + try { + const response = await P2P.postJson( + `http://${node.ip}:${node.port}/archiverjoin`, + taggedJoinRequest, + REQUEST_JOIN_TIMEOUT_SECOND + ) + Logger.mainLogger.debug('/archiverjoin response', response, node.ip + ':' + node.port) + if (response && response.success) return true + } catch (error) { + Logger.mainLogger.error(`Failed to send join request to ${node.ip}:${node.port}:`, error) + } + } + return false +} + +export async function checkJoinStatus(activeNodes: NodeList.ConsensusNodeInfo[]): Promise { + const CHECK_JOIN_TIMEOUT_SECOND = 10 // 10s timeout + let isJoined = false + + for (const node of activeNodes) { + try { + const response = await P2P.getJson( + `http://${node.ip}:${node.port}/joinedarchiver?publicKey=${State.getNodeInfo().publicKey}`, + CHECK_JOIN_TIMEOUT_SECOND + ) as JoinStatus + + if (response && response.isJoined) { + isJoined = true + break + } + } catch (error) { + Logger.mainLogger.error(`Failed to check join status with ${node.ip}:${node.port}:`, error) + } + } + + return isJoined +} + +export async function sendActiveRequest(): Promise { + const REQUEST_ACTIVE_TIMEOUT_SECOND = 2 // 2s timeout + const activeRequest = { + publicKey: State.getNodeInfo().publicKey, + nodeInfo: State.getNodeInfo(), + } + + const activeNodes = NodeList.getActiveList() + for (const node of activeNodes) { + const taggedActiveRequest = Crypto.tag(activeRequest, node.publicKey) + Logger.mainLogger.info(`Sending active request to node ${node.ip}:${node.port}`) + try { + const response = await P2P.postJson( + `http://${node.ip}:${node.port}/archiveractive`, + taggedActiveRequest, + REQUEST_ACTIVE_TIMEOUT_SECOND + ) + Logger.mainLogger.debug('/archiveractive response', response, node.ip + ':' + node.port) + } catch (error) { + Logger.mainLogger.error(`Failed to send active request to ${node.ip}:${node.port}:`, error) + } + } +} + +export async function checkActiveStatus(): Promise { + const CHECK_ACTIVE_TIMEOUT_SECOND = 10 // 10s timeout + const activeNodes = NodeList.getActiveList() + + for (const node of activeNodes) { + try { + const response = await P2P.getJson( + `http://${node.ip}:${node.port}/activearchiver?publicKey=${State.getNodeInfo().publicKey}`, + CHECK_ACTIVE_TIMEOUT_SECOND + ) as { isActive: boolean } + + if (response && response.isActive) { + return true + } + } catch (error) { + Logger.mainLogger.error(`Failed to check active status with ${node.ip}:${node.port}:`, error) + } + } + + return false +} + +export async function getCycleDuration(): Promise { + const GET_CYCLE_TIMEOUT_SECOND = 5 // 5s timeout + const activeNodes = NodeList.getActiveList() + + for (const node of activeNodes) { + try { + const response = await P2P.getJson( + `http://${node.ip}:${node.port}/cycleduration`, + GET_CYCLE_TIMEOUT_SECOND + ) as { cycleDuration: number } + + if (response && response.cycleDuration) { + return response.cycleDuration + } + } catch (error) { + Logger.mainLogger.error(`Failed to get cycle duration from ${node.ip}:${node.port}:`, error) + } + } + + // Default cycle duration + return 60 +} + +// Export nodesPerConsensusGroup and nodesPerEdge +export let nodesPerConsensusGroup = 0 +export let nodesPerEdge = 0 \ No newline at end of file diff --git a/src/Data/networkConfig.ts b/src/Data/networkConfig.ts new file mode 100644 index 00000000..df459219 --- /dev/null +++ b/src/Data/networkConfig.ts @@ -0,0 +1,122 @@ +import * as P2P from '../P2P' +import * as Logger from '../Logger' +import * as NodeList from '../NodeList' +import * as Utils from '../Utils' +import { config, updateConfig } from '../Config' +import { robustQuery } from '../Utils' + +let currentConsensusRadius = 0 +let nodesPerConsensusGroup = 0 +let nodesPerEdge = 0 + +export function getCurrentConsensusRadius(): number { + return currentConsensusRadius +} + +export function setCurrentConsensusRadius(value: number): void { + currentConsensusRadius = value +} + +export async function syncFromNetworkConfig(): Promise { + try { + const queryFn = async (node): Promise => { + const REQUEST_NETCONFIG_TIMEOUT_SECOND = 3 + try { + const response = await P2P.getJson(`http://${node.ip}:${node.port}/netconfig`, REQUEST_NETCONFIG_TIMEOUT_SECOND) + return response + } catch (error) { + Logger.mainLogger.error(`Error querying node ${node.ip}:${node.port}: ${error}`) + return null + } + } + const equalityFn = (responseA, responseB): boolean => { + return responseA?.config?.sharding?.nodesPerConsensusGroup === responseB?.config?.sharding?.nodesPerConsensusGroup + } + const nodes = NodeList.getActiveNodeCount() > 0 ? NodeList.getRandomActiveNodes(10) : [NodeList.getFirstNode()] + const tallyItem = await robustQuery( + nodes, + queryFn, + equalityFn, + 3 + ) + if (tallyItem?.value?.config?.stateManager) { + const { + useNewPOQ: newPOQReceipt, + configChangeMaxChangesToKeep, + configChangeMaxCyclesToKeep, + maxCyclesShardDataToKeep, + } = tallyItem.value.config.stateManager + + if ( + !Utils.isUndefined(newPOQReceipt) && + typeof newPOQReceipt === typeof config.newPOQReceipt && + newPOQReceipt !== config.newPOQReceipt + ) + updateConfig({ newPOQReceipt }) + if ( + !Utils.isUndefined(configChangeMaxChangesToKeep) && + typeof configChangeMaxChangesToKeep === typeof config.configChangeMaxChangesToKeep && + configChangeMaxChangesToKeep !== config.configChangeMaxChangesToKeep + ) + updateConfig({ configChangeMaxChangesToKeep }) + if ( + !Utils.isUndefined(configChangeMaxCyclesToKeep) && + typeof configChangeMaxCyclesToKeep === typeof config.configChangeMaxCyclesToKeep && + configChangeMaxCyclesToKeep !== config.configChangeMaxCyclesToKeep + ) + updateConfig({ configChangeMaxCyclesToKeep }) + if ( + !Utils.isUndefined(maxCyclesShardDataToKeep) && + typeof maxCyclesShardDataToKeep === typeof config.maxCyclesShardDataToKeep && + maxCyclesShardDataToKeep !== config.maxCyclesShardDataToKeep + ) + updateConfig({ maxCyclesShardDataToKeep }) + return tallyItem + } + return null + } catch (error) { + Logger.mainLogger.error('❌ Error in syncFromNetworkConfig: ', error) + return null + } +} + +export async function getConsensusRadius(): Promise { + if (NodeList.isEmpty()) return currentConsensusRadius + + const tallyItem = await syncFromNetworkConfig() + if (tallyItem?.value?.config) { + const nodesPerEdgeFromConfig = tallyItem.value.config.sharding?.nodesPerEdge + const nodesPerConsensusGroupFromConfig = tallyItem.value.config.sharding?.nodesPerConsensusGroup + + if (!Number.isInteger(nodesPerConsensusGroupFromConfig) || nodesPerConsensusGroupFromConfig <= 0) { + Logger.mainLogger.error('nodesPerConsensusGroup is not a valid number:', nodesPerConsensusGroupFromConfig) + return currentConsensusRadius + } + + if (!Number.isInteger(nodesPerEdgeFromConfig) || nodesPerEdgeFromConfig <= 0) { + Logger.mainLogger.error('nodesPerEdge is not a valid number:', nodesPerEdgeFromConfig) + return currentConsensusRadius + } + if (nodesPerConsensusGroup === nodesPerConsensusGroupFromConfig && nodesPerEdge === nodesPerEdgeFromConfig) + return currentConsensusRadius + nodesPerConsensusGroup = nodesPerConsensusGroupFromConfig + nodesPerEdge = nodesPerEdgeFromConfig + if (nodesPerConsensusGroup % 2 === 0) nodesPerConsensusGroup++ + const consensusRadius = Math.floor((nodesPerConsensusGroup - 1) / 2) + if (typeof consensusRadius !== 'number' || isNaN(consensusRadius) || consensusRadius <= 0) { + Logger.mainLogger.error('Invalid consensusRadius:', consensusRadius) + return currentConsensusRadius + } + Logger.mainLogger.debug( + 'consensusRadius', + consensusRadius, + 'nodesPerConsensusGroup', + nodesPerConsensusGroup, + 'nodesPerEdge', + nodesPerEdge + ) + return consensusRadius + } + Logger.mainLogger.error('Failed to get consensusRadius from the network') + return currentConsensusRadius +} \ No newline at end of file diff --git a/src/Data/nodeSubscription.ts b/src/Data/nodeSubscription.ts new file mode 100644 index 00000000..15923ec2 --- /dev/null +++ b/src/Data/nodeSubscription.ts @@ -0,0 +1,208 @@ +import * as NodeList from '../NodeList' +import * as Logger from '../Logger' +import { config } from '../Config' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { DataSender } from './types' +import { sendDataRequest } from './dataRequests' +import { DataRequestTypes } from './types' +import { initSocketClient, unsubscribeDataSender } from './socketClient' +import { getConsensusRadius, getCurrentConsensusRadius, setCurrentConsensusRadius } from './networkConfig' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import * as StateMetaData from '../archivedCycle/StateMetaData' + +let subsetNodesMapByConsensusRadius: Map = new Map() + +export function createContactTimeout(publicKey: NodeList.ConsensusNodeInfo['publicKey'], msg = ''): NodeJS.Timeout { + const CONTACT_TIMEOUT_MS = 10 * 1000 + if (config.VERBOSE) Logger.mainLogger.debug('Created contact timeout: ' + CONTACT_TIMEOUT_MS, `for ${publicKey}`) + nestedCountersInstance.countEvent('archiver', 'contact_timeout_created') + return setTimeout(async () => { + if (nestedCountersInstance) nestedCountersInstance.countEvent('archiver', 'contact_timeout') + Logger.mainLogger.debug('REPLACING sender due to CONTACT timeout', msg, publicKey) + // Import is circular, so we need to import dynamically + const { replaceDataSenderWithDataSenders } = await import('./Data') + await replaceDataSenderWithDataSenders(publicKey) + }, CONTACT_TIMEOUT_MS) +} + +export function addDataSender(sender: DataSender, dataSenders: Map): void { + dataSenders.set(sender.nodeInfo.publicKey, sender) +} + +export async function replaceDataSender( + publicKey: NodeList.ConsensusNodeInfo['publicKey'], + dataSenders: Map +): Promise { + nestedCountersInstance.countEvent('archiver', 'replace_data_sender') + if (NodeList.getActiveNodeCount() < 2) { + Logger.mainLogger.debug('There is only one active node in the network. Unable to replace data sender') + return + } + Logger.mainLogger.debug(`replaceDataSender: replacing ${publicKey}`) + + if (!socketClients.has(publicKey) || !dataSenders.has(publicKey)) { + Logger.mainLogger.debug( + 'This data sender is not in the subscribed list! and unsubscribing it', + publicKey, + socketClients.has(publicKey), + dataSenders.has(publicKey) + ) + unsubscribeDataSender(publicKey, dataSenders) + return + } + unsubscribeDataSender(publicKey, dataSenders) + const node = NodeList.byPublicKey.get(publicKey) + if (node) { + const nodeIndex = NodeList.activeListByIdSorted.findIndex((node) => node.publicKey === publicKey) + if (nodeIndex > -1) { + const subsetIndex = Math.floor(nodeIndex / getCurrentConsensusRadius()) + const subsetNodesList = subsetNodesMapByConsensusRadius.get(subsetIndex) + if (!subsetNodesList) { + Logger.mainLogger.error(`There is no nodes in the index ${subsetIndex} of subsetNodesMapByConsensusRadius!`) + return + } + subscribeNodeFromThisSubset(subsetNodesList, subsetIndex, dataSenders) + } + } +} + +export async function subscribeNodeForDataTransfer( + dataSenders: Map +): Promise { + if(config.passiveMode) { + Logger.mainLogger.debug('Archiver is in passive mode. Skipping data transfer subscription.') + return + } + + if (config.experimentalSnapshot) { + await subscribeConsensorsByConsensusRadius(dataSenders) + } else { + await StateMetaData.subscribeRandomNodeForDataTransfer() + } +} + +export async function createDataTransferConnection( + newSenderInfo: NodeList.ConsensusNodeInfo, + dataSenders: Map +): Promise { + const response = await sendDataRequest(newSenderInfo, DataRequestTypes.SUBSCRIBE) + if (response) { + initSocketClient(newSenderInfo, dataSenders, createContactTimeout) + const newSender: DataSender = { + nodeInfo: newSenderInfo, + types: [P2PTypes.SnapshotTypes.TypeNames.CYCLE, P2PTypes.SnapshotTypes.TypeNames.STATE_METADATA], + contactTimeout: createContactTimeout( + newSenderInfo.publicKey, + 'This timeout is created during newSender selection' + ), + } + addDataSender(newSender, dataSenders) + Logger.mainLogger.debug(`added new sender ${newSenderInfo.publicKey} to dataSenders`) + } + return response +} + +function shouldSubscribeToMoreConsensors(): boolean { + return config.subscribeToMoreConsensors && getCurrentConsensusRadius() > 5 +} + +export async function createNodesGroupByConsensusRadius(): Promise { + const consensusRadius = await getConsensusRadius() + if (consensusRadius === 0) { + Logger.mainLogger.error('Consensus radius is 0, unable to create nodes group.') + return + } + setCurrentConsensusRadius(consensusRadius) + const activeList = [...NodeList.activeListByIdSorted] + if (config.VERBOSE) Logger.mainLogger.debug('activeList', activeList.length, activeList) + let totalNumberOfNodesToSubscribe = Math.ceil(activeList.length / consensusRadius) + if (shouldSubscribeToMoreConsensors()) { + totalNumberOfNodesToSubscribe += totalNumberOfNodesToSubscribe * config.extraConsensorsToSubscribe + } + Logger.mainLogger.debug('totalNumberOfNodesToSubscribe', totalNumberOfNodesToSubscribe) + subsetNodesMapByConsensusRadius = new Map() + let round = 0 + for (let i = 0; i < activeList.length; i += consensusRadius) { + const subsetList: NodeList.ConsensusNodeInfo[] = activeList.slice(i, i + consensusRadius) + subsetNodesMapByConsensusRadius.set(round, subsetList) + round++ + } + if (config.VERBOSE) Logger.mainLogger.debug('subsetNodesMapByConsensusRadius', subsetNodesMapByConsensusRadius) +} + +export async function subscribeConsensorsByConsensusRadius( + dataSenders: Map +): Promise { + await createNodesGroupByConsensusRadius() + for (const [i, subsetList] of subsetNodesMapByConsensusRadius) { + if (config.VERBOSE) Logger.mainLogger.debug('Round', i, 'subsetList', subsetList, dataSenders.keys()) + subscribeNodeFromThisSubset(subsetList, i, dataSenders) + } +} + +export async function subscribeNodeFromThisSubset( + nodeList: NodeList.ConsensusNodeInfo[], + roundIndex: number, + dataSenders: Map +): Promise { + const subscribedNodesFromThisSubset = [] + for (const node of nodeList) { + if (dataSenders.has(node.publicKey)) { + if (config.VERBOSE) + Logger.mainLogger.debug('This node from the subset is in the subscribed list!', node.publicKey) + subscribedNodesFromThisSubset.push(node.publicKey) + } + } + let numberOfNodesToSubsribe = 1 + if (shouldSubscribeToMoreConsensors()) { + numberOfNodesToSubsribe += config.extraConsensorsToSubscribe + nestedCountersInstance.countEvent( + 'nodeSubscription', + 'add extra consensor(s): ' + config.extraConsensorsToSubscribe + ) + } else { + nestedCountersInstance.countEvent('nodeSubscription', 'add consensor: ') + } + if (subscribedNodesFromThisSubset.length > numberOfNodesToSubsribe) { + for (const publicKey of subscribedNodesFromThisSubset.splice(numberOfNodesToSubsribe)) { + Logger.mainLogger.debug('Unsubscribing extra node from this subset', publicKey) + unsubscribeDataSender(publicKey, dataSenders) + } + } + if (config.VERBOSE) Logger.mainLogger.debug('Subscribed nodes from this subset', subscribedNodesFromThisSubset) + if (subscribedNodesFromThisSubset.length === numberOfNodesToSubsribe) return + Logger.mainLogger.debug( + `Subscribing node(s) from this subset! numberOfNodesToSubsribe: ${numberOfNodesToSubsribe} roundIndex: ${roundIndex}` + ) + let subsetList = [...nodeList] + let newSenderInfo = nodeList[Math.floor(Math.random() * nodeList.length)] + let connectionStatus = false + let retry = 0 + const MAX_RETRY_SUBSCRIPTION = 3 * numberOfNodesToSubsribe + while (retry < MAX_RETRY_SUBSCRIPTION && subscribedNodesFromThisSubset.length < numberOfNodesToSubsribe) { + if (!dataSenders.has(newSenderInfo.publicKey)) { + connectionStatus = await createDataTransferConnection(newSenderInfo, dataSenders) + if (connectionStatus) { + if (!subscribedNodesFromThisSubset.includes(newSenderInfo.publicKey)) { + subscribedNodesFromThisSubset.push(newSenderInfo.publicKey) + Logger.mainLogger.debug( + `Added new sender to the subscribed nodes of this subset. publicKey:${newSenderInfo.publicKey}, numberOfNodesToSubsribe:${numberOfNodesToSubsribe}, roundIndex${roundIndex}` + ) + } + } + } + retry++ + subsetList = subsetList.filter((node) => node.publicKey !== newSenderInfo.publicKey) + if (subsetList.length === 0) { + Logger.mainLogger.debug(`Unable to find a new sender from this subset! round: ${roundIndex}`) + break + } + newSenderInfo = subsetList[Math.floor(Math.random() * subsetList.length)] + } +} + +// Import socketClients from socketClient +import { socketClients } from './socketClient' + +// Export functions that Data.ts needs +export { replaceDataSender as replaceDataSenderMain } \ No newline at end of file diff --git a/src/Data/socketClient.ts b/src/Data/socketClient.ts new file mode 100644 index 00000000..a7188e9e --- /dev/null +++ b/src/Data/socketClient.ts @@ -0,0 +1,225 @@ +import * as ioclient from 'socket.io-client' +import { Socket as SocketIOSocket } from 'socket.io-client' +import * as NodeList from '../NodeList' +import * as Logger from '../Logger' +import * as Crypto from '../Crypto' +import * as State from '../State' +import * as Utils from '../Utils' +import { Utils as UtilsTypes } from '@shardeum-foundation/lib-types' +import { Utils as StringUtils } from '@shardeum-foundation/lib-types' +import { nestedCountersInstance } from '../profiler/nestedCounters' +import { config } from '../Config' +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { DataResponse, DataSender, DataRequestTypes, subscriptionCycleData } from './types' +import { collectCycleData } from './cycleData' +import { storeReceiptData, storeOriginalTxData, storeAccountData } from './dataSync' +import { getCurrentCycleCounter } from './Cycles' +import * as Cycles from './Cycles' + +const GENESIS_ACCOUNTS_CYCLE_RANGE = { + startCycle: 0, + endCycle: 5, +} +import { sendDataRequest } from './dataRequests' +import { + clearCombinedAccountsData, + addToCombinedAccountsData, + getCombinedAccountsData, + syncGenesisAccountsFromConsensor +} from './accountData' + +export const socketClients: Map = new Map() + +export let forwardGenesisAccounts = false + +export function setForwardGenesisAccounts(value: boolean): void { + forwardGenesisAccounts = value +} + +export class ValidationTracker { + public discrepancyList: P2PTypes.CycleCreatorTypes.CycleData[] + constructor() { + this.discrepancyList = [] + } + add(data): void { + this.discrepancyList.push(data) + } +} + +export const validationTracker = new ValidationTracker() + +export function unsubscribeDataSender( + publicKey: NodeList.ConsensusNodeInfo['publicKey'], + dataSenders: Map +): void { + const sender = dataSenders.get(publicKey) + if (sender) { + if (sender.contactTimeout) { + clearTimeout(sender.contactTimeout) + sender.contactTimeout = null + } + sendDataRequest(sender.nodeInfo, DataRequestTypes.UNSUBSCRIBE) + dataSenders.delete(publicKey) + } + const socketClient = socketClients.get(publicKey) + if (socketClient) { + socketClient.emit('UNSUBSCRIBE', config.ARCHIVER_PUBLIC_KEY) + socketClient.close() + socketClients.delete(publicKey) + } + nestedCountersInstance.countEvent('archiver', 'remove_data_sender') + Logger.mainLogger.debug('Subscribed dataSenders', dataSenders.size, 'Connected socketClients', socketClients.size) + if (config.VERBOSE) + Logger.mainLogger.debug( + 'Subscribed dataSenders', + dataSenders.keys(), + 'Connected socketClients', + socketClients.keys() + ) +} + +export function initSocketClient( + node: NodeList.ConsensusNodeInfo, + dataSenders: Map, + createContactTimeout: (publicKey: string, msg: string) => NodeJS.Timeout +): void { + if (config.VERBOSE) Logger.mainLogger.debug('Node Info to socket connect', node) + + try { + const socketClient = ioclient.connect(`http://${node.ip}:${node.port}`, { + query: { + data: StringUtils.safeStringify( + Crypto.sign({ + publicKey: State.getNodeInfo().publicKey, + timestamp: Date.now(), + intendedConsensor: node.publicKey, + }) + ), + }, + }) + socketClients.set(node.publicKey, socketClient) + + socketClient.on('connect', () => { + Logger.mainLogger.debug(`✅ New Socket Connection to consensus node ${node.ip}:${node.port} is made`) + if (config.VERBOSE) Logger.mainLogger.debug('Connected node', node) + if (config.VERBOSE) Logger.mainLogger.debug('Init socketClients', socketClients.size, dataSenders.size) + }) + + socketClient.once('disconnect', async () => { + Logger.mainLogger.debug(`Connection request is refused by the consensor node ${node.ip}:${node.port}`) + }) + + socketClient.on('DATA', (data: string) => { + const newData: DataResponse & Crypto.TaggedMessage = + StringUtils.safeJsonParse(data) + if (!newData || !newData.responses) return + if (newData.recipient !== State.getNodeInfo().publicKey) { + Logger.mainLogger.debug('This data is not meant for this archiver') + return + } + + if (Crypto.authenticate(newData) === false) { + Logger.mainLogger.debug('This data cannot be authenticated') + unsubscribeDataSender(node.publicKey, dataSenders) + return + } + + if (config.experimentalSnapshot) { + let sender = dataSenders.get(newData.publicKey) + if (!sender) { + Logger.mainLogger.error('This sender is not in the subscribed nodes list', newData.publicKey) + return + } + if (sender.contactTimeout) { + if (config.VERBOSE) Logger.mainLogger.debug('Clearing contact timeout.') + clearTimeout(sender.contactTimeout) + sender.contactTimeout = null + nestedCountersInstance.countEvent('archiver', 'clear_contact_timeout') + } + + if (config.VERBOSE) console.log('DATA', sender.nodeInfo.publicKey, sender.nodeInfo.ip, sender.nodeInfo.port) + + if (newData.responses && newData.responses.ORIGINAL_TX_DATA) { + const originalTxData = newData.responses.ORIGINAL_TX_DATA as unknown as any[] + if (config.VERBOSE) + Logger.mainLogger.debug( + 'ORIGINAL_TX_DATA', + sender.nodeInfo.publicKey, + sender.nodeInfo.ip, + sender.nodeInfo.port, + originalTxData.length + ) + } + if (newData.responses && newData.responses.RECEIPT) { + const receipts = newData.responses.RECEIPT as unknown as any[] + if (config.VERBOSE) + Logger.mainLogger.debug( + 'RECEIPT', + sender.nodeInfo.publicKey, + sender.nodeInfo.ip, + sender.nodeInfo.port, + receipts.length + ) + storeReceiptData( + receipts, + sender.nodeInfo.ip + ':' + sender.nodeInfo.port, + true, + config.saveOnlyGossipData, + true + ) + } + if (newData.responses && newData.responses.CYCLE) { + const cycles = newData.responses.CYCLE as unknown as (P2PTypes.CycleCreatorTypes.CycleData[] | subscriptionCycleData[]) + collectCycleData(cycles, sender.nodeInfo.ip + ':' + sender.nodeInfo.port, 'data-sender', dataSenders) + } + if (newData.responses && newData.responses.ACCOUNT) { + if (getCurrentCycleCounter() > GENESIS_ACCOUNTS_CYCLE_RANGE.endCycle) { + Logger.mainLogger.error( + 'Account data is not meant to be received after the genesis accounts cycle range', + getCurrentCycleCounter() + ) + unsubscribeDataSender(sender.nodeInfo.publicKey, dataSenders) + return + } + if ( + Cycles.currentNetworkMode !== 'forming' || + NodeList.byPublicKey.size > 1 || + !NodeList.byPublicKey.has(sender.nodeInfo.publicKey) + ) { + Logger.mainLogger.error( + 'Account data is not meant to be received by the first validator', + `Number of nodes in the network ${NodeList.byPublicKey.size}` + ) + unsubscribeDataSender(sender.nodeInfo.publicKey, dataSenders) + return + } + Logger.mainLogger.debug(`RECEIVED ACCOUNTS DATA FROM ${sender.nodeInfo.ip}:${sender.nodeInfo.port}`) + nestedCountersInstance.countEvent('genesis', 'accounts', 1) + if (!forwardGenesisAccounts) { + Logger.mainLogger.debug('Genesis Accounts To Sync', newData.responses.ACCOUNT) + syncGenesisAccountsFromConsensor(newData.responses.ACCOUNT as any, sender.nodeInfo) + } else { + // storingAccountData flag is handled in Collector.ts + Logger.mainLogger.debug('Storing Account Data') + const accountData = newData.responses.ACCOUNT as any + if (accountData.accounts || accountData.receipts) { + addToCombinedAccountsData(accountData) + } else { + storeAccountData(accountData) + } + } + } + + nestedCountersInstance.countEvent('archiver', 'postpone_contact_timeout') + sender = dataSenders.get(newData.publicKey) + if (sender) + sender.contactTimeout = createContactTimeout( + sender.nodeInfo.publicKey, + 'This timeout is created after processing data' + ) + } + }) + } catch (error) { + console.error('Error occurred during socket connection:', error) + } +} \ No newline at end of file diff --git a/src/Data/types.ts b/src/Data/types.ts new file mode 100644 index 00000000..bb00c284 --- /dev/null +++ b/src/Data/types.ts @@ -0,0 +1,132 @@ +import { P2P as P2PTypes } from '@shardeum-foundation/lib-types' +import { Socket as SocketIOSocket } from 'socket.io-client' +import * as NodeList from '../NodeList' +import * as ReceiptDB from '../dbstore/receipts' +import * as OriginalTxDB from '../dbstore/originalTxsData' +import * as State from '../State' + +export interface CombinedAccountsData { + accounts: any[] + receipts: any[] +} + +export interface ValidatorColletor { + nodeId: string + signedMessage: { + cycleRecord: { + counter: number + mode: string + archiversAtShutdown?: string[] + } + } +} + +export interface ValidatorCycle { + node: string + cycle: P2PTypes.CycleCreatorTypes.CycleData +} + +export interface Signer { + owner: string + sig: string +} + +export interface DataRequestTypes { + SUBSCRIBE: 'SUBSCRIBE' + UNSUBSCRIBE: 'UNSUBSCRIBE' +} + +export const DataRequestTypes: DataRequestTypes = { + SUBSCRIBE: 'SUBSCRIBE', + UNSUBSCRIBE: 'UNSUBSCRIBE', +} + +export interface subscriptionCycleData extends Omit { + certificate?: P2PTypes.CycleCreatorTypes.CycleCert + certificates?: P2PTypes.CycleCreatorTypes.CycleCert[] +} + +export interface DataRequest { + type: P2PTypes.SnapshotTypes.TypeName + lastData: P2PTypes.SnapshotTypes.TypeIndex +} + +export interface DataResponse { + publicKey?: NodeList.ConsensusNodeInfo['publicKey'] + recipient: NodeList.ConsensusNodeInfo['publicKey'] + responses: { [name: string]: T } +} + +export interface StoredReceiptObject { + receipts: ReceiptDB.Receipt[] + success: boolean +} + +export interface CountResponse { + receipts?: ArchiverReceiptCountResponse + originalTxs?: ArchiverOriginalTxsCountResponse + cycles?: number + accounts?: number +} + +export interface ArchiverAccountResponse { + accounts: any[] + transactions?: any[] + receipts?: any[] + totalAccounts?: number +} + +export interface ArchiverCycleResponse { + cycleInfo: P2PTypes.CycleCreatorTypes.CycleData[] +} + +export interface ArchiverReceiptResponse { + receipts: ReceiptDB.Receipt[] | ReceiptDB.ReceiptCount[] +} + +export interface ArchiverOriginalTxResponse { + originalTxs: OriginalTxDB.OriginalTxData[] | OriginalTxDB.OriginalTxDataCount[] +} + +export interface ArchiverReceiptCountResponse { + countByCycles: { cycle: number; count: number }[] + startCycle: number + endCycle: number +} + +export interface ArchiverOriginalTxsCountResponse { + countByCycles: { cycle: number; count: number }[] + startCycle: number + endCycle: number +} + +export interface ArchiverTotalDataResponse { + totalReceipts: number + totalCycles: number + totalAccounts: number + totalOriginalTxs: number +} + +// Re-export RequestDataType from API.ts to avoid duplication +export { RequestDataType } from '../API' + +export interface RequestDataCountType { + type: 'tally' +} + +export interface DataSender { + nodeInfo: NodeList.ConsensusNodeInfo + types: (keyof typeof P2PTypes.SnapshotTypes.TypeNames)[] + contactTimeout?: NodeJS.Timeout | null + replaceTimeout?: NodeJS.Timeout | null +} + +export interface CompareResponse { + success: boolean + matchedCycle: number +} + +export interface ArchiverWithRetries { + archiver: State.ArchiverNodeInfo + retriesLeft: number +} \ No newline at end of file diff --git a/src/server.ts b/src/server.ts index d870efcd..fcf7f45b 100644 --- a/src/server.ts +++ b/src/server.ts @@ -220,7 +220,7 @@ async function start(): Promise { Logger.mainLogger.debug('We have successfully joined the network') await startServer() - await Data.subscribeNodeForDataTransfer() + await Data.subscribeNodeForDataTransferWithDataSenders() } else { await startServer() } @@ -394,7 +394,7 @@ async function syncAndStartServer(): Promise { await sendActiveMessage() } else { State.setSyncing(false) - await Data.subscribeNodeForDataTransfer() + await Data.subscribeNodeForDataTransferWithDataSenders() } // Schedule multi-signature keys sync @@ -595,7 +595,7 @@ async function sendActiveMessage(): Promise { isActive = true } - await Data.subscribeNodeForDataTransfer() + await Data.subscribeNodeForDataTransferWithDataSenders() State.setSyncing(false) }