diff --git a/README.md.template b/README.md.template index ccb9f3f0d..f72e10f07 100644 --- a/README.md.template +++ b/README.md.template @@ -18,11 +18,16 @@ An opinionated CLI tool to deploy and manage standalone test networks. * [Requirements](#requirements) * [Setup](#setup) * [Install Solo](#install-solo) -* [Setup Kubernetes cluster](#setup-kubernetes-cluster) -* [Generate Node Keys](#generate-node-keys) - * [Standard keys (.pem file)](#standard-keys-pem-file) -* [Examples](#examples) - * [Example - 1: Deploy a standalone test network (version `0.54.0-alpha.4`)](#example---1-deploy-a-standalone-test-network-version-0540-alpha4) +* [Use the Task tool to launch Solo](#use-the-task-tool-to-launch-solo) +* [Advanced User Guide](#advanced-user-guide) + * [Setup Kubernetes cluster](#setup-kubernetes-cluster) + * [Step by Step Instructions](#step-by-step-instructions) +* [For Hashgraph Developers](#for-hashgraph-developers) + * [For Developers Working on Hedera Service Repo](#for-developers-working-on-hedera-service-repo) + * [For Developers Working on Platform core](#for-developers-working-on-platform-core) + * [Using IntelliJ remote debug with Solo](#using-intellij-remote-debug-with-solo) + * [Retrieving Logs](#retrieving-logs) + * [Save and reuse network state files](#save-and-reuse-network-state-files) * [Support](#support) * [Contributing](#contributing) * [Code of Conduct](#code-of-conduct) @@ -53,9 +58,40 @@ nvm use lts/hydrogen * Run `npm install -g @hashgraph/solo` -## Setup Kubernetes cluster +## Use the Task tool to launch Solo -### Remote cluster +First, install the cluster tool `kind` with this [link](https://kind.sigs.k8s.io/docs/user/quick-start#installation) + +Then, install the task tool `task` with this [link](https://taskfile.dev/#/installation) + +Then, use the following steps to install dependencies and build solo project. + +```bash +npm ci +npm run build +``` +Then, user can use one of the following three commands to quickly deploy a standalone solo network. + +```bash +# Option 1) deploy solo network with two nodes +task default + +# Option 2) deploy solo network with two nodes, and mirror node +task default-with-mirror + +# Option 3) deploy solo network with two nodes, mirror node, and JSON RPC relay +task default-with-relay +``` +To tear down the solo network +```bash +task clean +``` + +## Advanced User Guide +For those who would like to have more control or need some customized setups, here are some step by step instructions of how to setup and deploy a solo network. +### Setup Kubernetes cluster + +#### Remote cluster * You may use remote kubernetes cluster. In this case, ensure kubernetes context is set up correctly. @@ -63,7 +99,7 @@ nvm use lts/hydrogen kubectl config use-context ``` -### Local cluster +#### Local cluster * You may use [kind](https://kind.sigs.k8s.io/) or [microk8s](https://microk8s.io/) to create a cluster. In this case, ensure your Docker engine has enough resources (e.g. Memory >=8Gb, CPU: >=4). Below we show how you can use `kind` to create a cluster @@ -116,9 +152,8 @@ You may now view pods in your cluster using `k9s -A` as below: └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘ ``` -## Examples -### Example - 1: Deploy a standalone test network (version `0.54.0-alpha.4`) +### Step by Step Instructions * Initialize `solo` directories: @@ -318,8 +353,8 @@ Example output ``` $SOLO_RELAY_DEPLOY_OUTPUT ``` - -## For Developers Working on Hedera Service Repo +## For Hashgraph Developers +### For Developers Working on Hedera Service Repo First, please clone hedera service repo `https://github.com/hashgraph/hedera-services/` and build the code with `./gradlew assemble`. If need to running nodes with different versions or releases, please duplicate the repo or build directories in @@ -335,7 +370,7 @@ solo node setup -i node1,node2,node3 -n "${SOLO_NAMESPACE}" --local-build-path < # example: solo node setup -i node1,node2,node3 -n "${SOLO_NAMESPACE}" --local-build-path node1=../hedera-services/hedera-node/data/,../hedera-services/hedera-node/data,node3=../hedera-services/hedera-node/data ``` -## For Developers Working on Platform core +### For Developers Working on Platform core To deploy node with local build PTT jar files, run the following command: ``` @@ -343,12 +378,12 @@ solo node setup -i node1,node2,node3 -n "${SOLO_NAMESPACE}" --local-build-path < # example: solo node setup -i node1,node2,node3 -n "${SOLO_NAMESPACE}" --local-build-path ../hedera-services/platform-sdk/sdk/data,node1=../hedera-services/platform-sdk/sdk/data,node2=../hedera-services/platform-sdk/sdk/data --app PlatformTestingTool.jar --app-config ../hedera-services/platform-sdk/platform-apps/tests/PlatformTestingTool/src/main/resources/FCMFCQ-Basic-2.5k-5m.json ``` -## Logs +### Retrieving Logs You can find log for running solo command under the directory `~/.solo/logs/` The file `solo.log` contains the logs for the solo command. The file `hashgraph-sdk.log` contains the logs from Solo client when sending transactions to network nodes. -## Using IntelliJ remote debug with Solo +### Using IntelliJ remote debug with Solo NOTE: the hedera-services path referenced '../hedera-services/hedera-node/data' may need to be updated based on what directory you are currently in. This also assumes that you have done an assemble/build and the directory contents are up-to-date. @@ -402,6 +437,31 @@ solo node setup -i node1,node2,node3,node4 --local-build-path ../hedera-services solo node start -i node1,node2,node3,node4 -n "${SOLO_NAMESPACE}" solo node delete --node-alias node2 --debug-node-alias node3 -n "${SOLO_NAMESPACE}" ``` +### Save and reuse network state files + +With the following command you can save the network state to a file. +```bash +# must stop hedera node operation first +npm run solo-test -- node stop -i node1,node2 -n solo-e2e + +# download state file to default location at ~/.solo/logs/ +npm run solo-test -- node states -i node1,node2 -n solo-e2e +``` + +By default the state files are saved under `~/solo` directory + +```bash +└── logs + ├── solo-e2e + │   ├── network-node1-0-state.zip + │   └── network-node2-0-state.zip + └── solo.log +``` + +Later, user can use the following command to upload the state files to the network and restart hedera nodes. +```bash +npm run solo-test -- node start -i node1,node2 -n solo-e2e --state-file network-node1-0-state.zip +``` ## Support diff --git a/resources/templates/settings.txt b/resources/templates/settings.txt index 3b34a834b..10cb13438 100644 --- a/resources/templates/settings.txt +++ b/resources/templates/settings.txt @@ -12,3 +12,4 @@ crypto.enableNewKeyStoreModel, true # TODO: remove this? only defaults to true when going from 0.52 to 0.53 event.migrateEventHashing, false +state.saveStatePeriod, 60 diff --git a/src/commands/flags.ts b/src/commands/flags.ts index 4ea72ed88..ac84e0d09 100644 --- a/src/commands/flags.ts +++ b/src/commands/flags.ts @@ -182,6 +182,16 @@ export const deployJsonRpcRelay: CommandFlag = { } } +export const stateFile: CommandFlag = { + constName: 'stateFile', + name: 'state-file', + definition: { + describe: 'A zipped state file to be used for the network', + defaultValue: '', + type: 'string' + } +} + export const releaseTag: CommandFlag = { constName: 'releaseTag', name: 'release-tag', @@ -882,6 +892,7 @@ export const allFlags: CommandFlag[] = [ relayReleaseTag, releaseTag, replicaCount, + stateFile, setAlias, settingTxt, stakeAmounts, diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 5fb64d354..7de1fe1be 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -338,9 +338,9 @@ export class MirrorNodeCommand extends BaseCommand { try { await tasks.run() - self.logger.debug('node start has completed') + self.logger.debug('mirror node depolyment has completed') } catch (e: Error | any) { - throw new SoloError(`Error starting node: ${e.message}`, e) + throw new SoloError(`Error deploying node: ${e.message}`, e) } finally { await lease.release() await self.accountManager.close() @@ -429,9 +429,9 @@ export class MirrorNodeCommand extends BaseCommand { try { await tasks.run() - self.logger.debug('node start has completed') + self.logger.debug('mirror node destruction has completed') } catch (e: Error | any) { - throw new SoloError(`Error starting node: ${e.message}`, e) + throw new SoloError(`Error destrong mirror node: ${e.message}`, e) } finally { await lease.release() await self.accountManager.close() diff --git a/src/commands/node/configs.ts b/src/commands/node/configs.ts index dca534340..2daf1e30f 100644 --- a/src/commands/node/configs.ts +++ b/src/commands/node/configs.ts @@ -246,6 +246,17 @@ export const logsConfigBuilder = function (argv, ctx, task) { return config } +export const statesConfigBuilder = function (argv, ctx, task) { + /** @type {{namespace: string, nodeAliases: NodeAliases, nodeAliasesUnparsed:string}} */ + const config = { + namespace: this.configManager.getFlag(flags.namespace), + nodeAliases: helpers.parseNodeAliases(this.configManager.getFlag(flags.nodeAliasesUnparsed)), + nodeAliasesUnparsed: this.configManager.getFlag(flags.nodeAliasesUnparsed) + } + ctx.config = config + return config +} + export const refreshConfigBuilder = async function (argv, ctx, task) { ctx.config = this.getConfig(REFRESH_CONFIGS_NAME, argv.flags, [ diff --git a/src/commands/node/flags.ts b/src/commands/node/flags.ts index 4a8aa7122..d863237e3 100644 --- a/src/commands/node/flags.ts +++ b/src/commands/node/flags.ts @@ -184,6 +184,12 @@ export const LOGS_FLAGS = { optionalFlags: [] } +export const STATES_FLAGS = { + requiredFlags: [flags.namespace, flags.nodeAliasesUnparsed], + requiredFlagsWithDisabledPrompt: [], + optionalFlags: [] +} + export const REFRESH_FLAGS = { requiredFlags: [ flags.cacheDir, @@ -239,6 +245,7 @@ export const START_FLAGS = { flags.quiet, flags.nodeAliasesUnparsed, flags.debugNodeAlias, + flags.stateFile, flags.stakeAmounts, ] } diff --git a/src/commands/node/handlers.ts b/src/commands/node/handlers.ts index 22a2d24f7..576b0179f 100644 --- a/src/commands/node/handlers.ts +++ b/src/commands/node/handlers.ts @@ -18,8 +18,17 @@ import * as helpers from '../../core/helpers.js' import * as NodeFlags from './flags.js' import { - addConfigBuilder, deleteConfigBuilder, downloadGeneratedFilesConfigBuilder, keysConfigBuilder, logsConfigBuilder, - prepareUpgradeConfigBuilder, refreshConfigBuilder, setupConfigBuilder, startConfigBuilder, stopConfigBuilder, + addConfigBuilder, + deleteConfigBuilder, + downloadGeneratedFilesConfigBuilder, + keysConfigBuilder, + logsConfigBuilder, + prepareUpgradeConfigBuilder, + refreshConfigBuilder, + setupConfigBuilder, + startConfigBuilder, + statesConfigBuilder, + stopConfigBuilder, updateConfigBuilder } from './configs.js' import { @@ -500,6 +509,21 @@ export class NodeCommandHandlers { return true } + async states (argv: any) { + argv = helpers.addFlagsToArgv(argv, NodeFlags.STATES_FLAGS) + + const action = helpers.commandActionBuilder([ + this.tasks.initialize(argv, statesConfigBuilder.bind(this), null), + this.tasks.getNodeStateFiles() + ], { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION + }, 'Error in downloading states from nodes', null) + + await action(argv, this) + return true + } + async refresh (argv: any) { argv = helpers.addFlagsToArgv(argv, NodeFlags.REFRESH_FLAGS) @@ -566,6 +590,9 @@ export class NodeCommandHandlers { const action = helpers.commandActionBuilder([ this.tasks.initialize(argv, startConfigBuilder.bind(this), lease), this.tasks.identifyExistingNodes(), + this.tasks.uploadStateFiles( + (ctx: any) => ctx.config.stateFile.length === 0 + ), this.tasks.startNodes('nodeAliases'), this.tasks.enablePortForwarding(), this.tasks.checkAllNodesAreActive('nodeAliases'), diff --git a/src/commands/node/index.ts b/src/commands/node/index.ts index 6f0afbfa6..d5cc19d73 100644 --- a/src/commands/node/index.ts +++ b/src/commands/node/index.ts @@ -137,6 +137,13 @@ export class NodeCommand extends BaseCommand { handler: 'logs' }, NodeFlags.LOGS_FLAGS)) + .command(new YargsCommand({ + command: 'states', + description: 'Download hedera states from the network nodes and stores them in /// directory', + commandDef: nodeCmd, + handler: 'states' + }, NodeFlags.STATES_FLAGS)) + .command(new YargsCommand({ command: 'add', description: 'Adds a node with a specific version of Hedera platform', diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index f716c9fcd..cd5e1bc49 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -55,7 +55,7 @@ import crypto from 'crypto' import { addDebugOptions, getNodeAccountMap, - getNodeLogs, + getNodeLogs, getNodeStatesFromPod, prepareEndpoints, renameAndCopyFile, sleep, @@ -691,6 +691,26 @@ export class NodeCommandTasks { }) } + uploadStateFiles (skip: Function | boolean) { + const self = this + return new Task('Upload state files network nodes', async (ctx: any, task: ListrTaskWrapper) => { + const config = ctx.config + + const zipFile = config.stateFile + self.logger.debug(`zip file: ${zipFile}`) + for (const nodeAlias of ctx.config.nodeAliases) { + const podName = ctx.config.podNames[nodeAlias] + self.logger.debug(`Uploading state files to pod ${podName}`) + await self.k8.copyTo(podName, constants.ROOT_CONTAINER, zipFile, `${constants.HEDERA_HAPI_PATH}/data`) + + self.logger.info(`Deleting the previous state files in pod ${podName} directory ${constants.HEDERA_HAPI_PATH}/data/saved`) + await self.k8.execContainer(podName, constants.ROOT_CONTAINER, ['rm', '-rf', `${constants.HEDERA_HAPI_PATH}/data/saved/*`]) + await self.k8.execContainer(podName, constants.ROOT_CONTAINER, + ['tar', '-xvf', `${constants.HEDERA_HAPI_PATH}/data/${path.basename(zipFile)}`, '-C', `${constants.HEDERA_HAPI_PATH}/data/saved`]) + } + }, skip) + } + identifyNetworkPods () { const self = this return new Task('Identify network pods', (ctx: any, task: ListrTaskWrapper) => { @@ -965,6 +985,15 @@ export class NodeCommandTasks { }) } + getNodeStateFiles () { + const self = this + return new Task('Get node states', async (ctx: any, task: ListrTaskWrapper) => { + for (const nodeAlias of ctx.config.nodeAliases) { + await getNodeStatesFromPod(self.k8, ctx.config.namespace, nodeAlias) + } + }) + } + checkPVCsEnabled () { return new Task('Check that PVCs are enabled', (ctx: any, task: ListrTaskWrapper) => { if (!this.configManager.getFlag(flags.persistentVolumeClaims)) { diff --git a/src/core/helpers.ts b/src/core/helpers.ts index e5c166743..935f02563 100644 --- a/src/core/helpers.ts +++ b/src/core/helpers.ts @@ -217,6 +217,44 @@ async function getNodeLog (pod: V1Pod, namespace: string, timeString: string, k8 k8.logger.debug(`getNodeLogs(${pod.metadata.name}): ...end`) } + +/** + * Download state files from a pod + * @param k8 - an instance of core/K8 + * @param namespace - the namespace of the network + * @param nodeAlias - the pod name + * @returns a promise that resolves when the state files are downloaded + */ +export async function getNodeStatesFromPod (k8: K8, namespace: string, nodeAlias: string) { + const pods = await k8.getPodsByLabel([`solo.hedera.com/node-name=${nodeAlias}`]) + // get length of pods + const promises = [] + for (const pod of pods) { + promises.push(getNodeState(pod, namespace, k8)) + } + return await Promise.all(promises) +} + + +async function getNodeState (pod: V1Pod, namespace: string, k8: K8){ + const podName = pod.metadata!.name as PodName + k8.logger.debug(`getNodeState(${pod.metadata.name}): begin...`) + const targetDir = path.join(SOLO_LOGS_DIR, namespace) + try { + if (!fs.existsSync(targetDir)) { + fs.mkdirSync(targetDir, { recursive: true }) + } + const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podName}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .` + await k8.execContainer(podName, ROOT_CONTAINER, zipCommand) + await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podName}-state.zip`, targetDir) + } catch (e: Error | any) { + k8.logger.error(`failed to download state from pod ${podName}`, e) + k8.logger.showUser(`Failed to download state from pod ${podName}` + e) + } + k8.logger.debug(`getNodeState(${pod.metadata.name}): ...end`) +} + + /** * Create a map of node aliases to account IDs * @param nodeAliases diff --git a/test/e2e/commands/node_local_hedera.test.ts b/test/e2e/commands/node_local_hedera.test.ts index a5782491a..ee2a6c3c1 100644 --- a/test/e2e/commands/node_local_hedera.test.ts +++ b/test/e2e/commands/node_local_hedera.test.ts @@ -22,9 +22,12 @@ import { getDefaultArgv, TEST_CLUSTER } from '../../test_util.js' -import { getNodeLogs } from '../../../src/core/helpers.js' -import { MINUTES } from '../../../src/core/constants.js' +import { getNodeLogs, sleep } from '../../../src/core/helpers.js' +import { MINUTES, SOLO_LOGS_DIR } from '../../../src/core/constants.js' import type { K8 } from '../../../src/core/index.js' +import path from 'path' +import { expect } from 'chai' +import { AccountBalanceQuery, AccountCreateTransaction, Hbar, HbarUnit, PrivateKey } from '@hashgraph/sdk' const LOCAL_HEDERA = 'local-hedera-app' const argv = getDefaultArgv() @@ -42,9 +45,53 @@ argv[flags.localBuildPath.name] = 'node1=../hedera-services/hedera-node/data/,.. argv[flags.namespace.name] = LOCAL_HEDERA e2eTestSuite(LOCAL_HEDERA, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, (bootstrapResp) => { + const nodeCmd = bootstrapResp.cmd.nodeCmd + const accountCmd = bootstrapResp.cmd.accountCmd + const accountManager = bootstrapResp.manager.accountManager describe('Node for hedera app should have started successfully', () => { hederaK8 = bootstrapResp.opts.k8 + it('save the state and restart the node with saved state', async function () { + // create an account so later we can verify its balance after restart + await accountManager.loadNodeClient(LOCAL_HEDERA) + const privateKey = PrivateKey.generate() + // get random integer between 100 and 1000 + const amount = Math.floor(Math.random() * (1000 - 100) + 100) + + const newAccount = await new AccountCreateTransaction() + .setKey(privateKey) + .setInitialBalance(Hbar.from(amount, HbarUnit.Hbar)) + .execute(accountManager._nodeClient) + + // Get the new account ID + const getReceipt = await newAccount.getReceipt(accountManager._nodeClient) + const accountInfo = { + accountId: getReceipt.accountId.toString(), + balance: amount + } + + // create more transactions to save more round of states + await accountCmd.create(argv) + await sleep(3) + await accountCmd.create(argv) + await sleep(3) + + // stop network and save the state + await nodeCmd.handlers.stop(argv) + await nodeCmd.handlers.states(argv) + + argv[flags.stateFile.name] = path.join(SOLO_LOGS_DIR, LOCAL_HEDERA, 'network-node1-0-state.zip') + await nodeCmd.handlers.start(argv) + + // check balance of accountInfo.accountId + await accountManager.loadNodeClient(LOCAL_HEDERA) + const balance = await new AccountBalanceQuery() + .setAccountId(accountInfo.accountId) + .execute(accountManager._nodeClient) + + expect(balance.hbars).to.be.eql(Hbar.from(accountInfo.balance, HbarUnit.Hbar)) + }).timeout(10 * MINUTES) + it('get the logs and delete the namespace', async function () { await getNodeLogs(hederaK8, LOCAL_HEDERA) await hederaK8.deleteNamespace(LOCAL_HEDERA) diff --git a/test/test_util.ts b/test/test_util.ts index 4d024a5a8..a1c57ab29 100644 --- a/test/test_util.ts +++ b/test/test_util.ts @@ -116,6 +116,9 @@ interface TestOpts { interface BootstrapResponse { namespace: string, opts: TestOpts, + manager: { + accountManager: AccountManager + }, cmd: { initCmd: InitCommand, clusterCmd: ClusterCommand, @@ -182,6 +185,9 @@ export function bootstrapTestVariables ( return { namespace, opts, + manager: { + accountManager + }, cmd: { initCmd, clusterCmd,