Skip to content

Commit

Permalink
fix: use latest FST chart and a single consensus node during mirror-n…
Browse files Browse the repository at this point in the history
…ode e2e test (#264)

Signed-off-by: Lenin Mehedy <lenin.mehedy@swirldslabs.com>
Signed-off-by: Jeffrey Tang <jeffrey@swirldslabs.com>
Co-authored-by: Jeffrey Tang <jeffrey@swirldslabs.com>
  • Loading branch information
leninmehedy and JeffreyDallas authored May 8, 2024
1 parent 16cb4ba commit 5a5a02f
Show file tree
Hide file tree
Showing 9 changed files with 103 additions and 26 deletions.
8 changes: 7 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ Then run the following command to set the kubectl context to the new cluster:
```bash
kind create cluster -n "${SOLO_CLUSTER_NAME}"
```

Example output

```
Expand Down Expand Up @@ -184,6 +185,7 @@ Kubernetes Namespace : solo
✔ Generate gRPC TLS keys
✔ Finalize
```

Key files are generated in `~/.solo/keys` directory.

```
Expand All @@ -192,6 +194,7 @@ $ ls ~/.solo/cache/keys
hedera-node0.crt hedera-node1.crt hedera-node2.crt private-node0.pfx private-node2.pfx
hedera-node0.key hedera-node1.key hedera-node2.key private-node1.pfx public.pfx
```

* Setup cluster with shared components
* In a separate terminal, you may run `k9s` to view the pod status.

Expand All @@ -214,7 +217,6 @@ Kubernetes Namespace : solo
✔ Install 'fullstack-cluster-setup' chart
```


* Deploy helm chart with Hedera network components
* It may take a while (5~15 minutes depending on your internet speed) to download various docker images and get the pods started.
* If it fails, ensure you have enough resources allocated for Docker engine and retry the command.
Expand Down Expand Up @@ -334,6 +336,7 @@ Kubernetes Namespace : solo
✔ Check proxy for node: node1
✔ Check node proxies are ACTIVE
```

* Deploy mirror node

```
Expand Down Expand Up @@ -518,14 +521,17 @@ Kubernetes Namespace : solo
✔ Generate gRPC TLS keys
✔ Finalize
```

PEM key files are generated in `~/.solo/keys` directory.

```
$ ls ~/.solo/cache/keys
a-private-node0.pem a-public-node1.pem hedera-node1.crt s-private-node0.pem s-public-node1.pem
a-private-node1.pem a-public-node2.pem hedera-node1.key s-private-node1.pem s-public-node2.pem
a-private-node2.pem hedera-node0.crt hedera-node2.crt s-private-node2.pem
a-public-node0.pem hedera-node0.key hedera-node2.key s-public-node0.pem
```

* Setup cluster with shared components

```
Expand Down
12 changes: 6 additions & 6 deletions src/commands/mirror_node.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -127,43 +127,43 @@ export class MirrorNodeCommand extends BaseCommand {
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=postgresql',
'app.kubernetes.io/name=postgres'
], 1, 900, 2000)
], 1, 300, 2000)
},
{
title: 'Check REST API',
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=rest',
'app.kubernetes.io/name=rest'
], 1, 900, 200)
], 1, 300, 2000)
},
{
title: 'Check GRPC',
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=grpc',
'app.kubernetes.io/name=grpc'
], 1, 900, 2000)
], 1, 300, 2000)
},
{
title: 'Check Monitor',
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=monitor',
'app.kubernetes.io/name=monitor'
], 1, 900, 2000)
], 1, 300, 2000)
},
{
title: 'Check Importer',
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=importer',
'app.kubernetes.io/name=importer'
], 1, 900, 2000)
], 1, 300, 2000)
},
{
title: 'Check Hedera Explorer',
skip: (ctx, _) => !ctx.config.deployHederaExplorer,
task: async (ctx, _) => self.k8.waitForPodReady([
'app.kubernetes.io/component=hedera-explorer',
'app.kubernetes.io/name=hedera-explorer'
], 1, 900, 2000)
], 1, 300, 2000)
}
]

Expand Down
18 changes: 18 additions & 0 deletions src/core/k8.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -837,6 +837,14 @@ export class K8 {
})
}

/**
* Check if pod is ready
* @param labels pod labels
* @param podCount number of pod expected
* @param maxAttempts maximum attempts to check
* @param delay delay between checks in milliseconds
* @return {Promise<unknown>}
*/
async waitForPodReady (labels = [], podCount = 1, maxAttempts = 10, delay = 500) {
try {
return await this.waitForPodCondition(K8.PodReadyCondition, labels, podCount, maxAttempts, delay)
Expand All @@ -845,6 +853,16 @@ export class K8 {
}
}

/**
* Check pods for conditions
* @param conditionsMap a map of conditions and values
* @param labels pod labels
* @param podCount number of pod expected
* @param maxAttempts maximum attempts to check
* @param delay delay between checks in milliseconds
* @return {Promise<unknown>}
*/

async waitForPodCondition (
conditionsMap,
labels = [],
Expand Down
43 changes: 43 additions & 0 deletions test/data/warmup-cluster.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
#!/bin/bash
SOLO_CLUSTER=solo-e2e

SOLO_IMAGE_LIST=( \
docker.io/bitnami/postgresql-repmgr:14.11.0-debian-12-r8 \
docker.io/envoyproxy/envoy:v1.21.1 \
docker.io/grafana/grafana:10.1.5 \
docker.io/haproxytech/haproxy-alpine:2.4.25 \
quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 \
docker.io/otel/opentelemetry-collector-contrib:0.72.0 \
gcr.io/hedera-registry/hedera-mirror-node-explorer:24.4.0 \
gcr.io/hedera-registry/uploader-mirror:1.3.0 \
gcr.io/mirrornode/hedera-mirror-grpc:0.103.0 \
quay.io/prometheus-operator/prometheus-operator:v0.68.0 \
gcr.io/mirrornode/hedera-mirror-importer:0.103.0 \
gcr.io/mirrornode/hedera-mirror-monitor:0.103.0 \
gcr.io/mirrornode/hedera-mirror-rest:0.103.0 \
quay.io/prometheus/alertmanager:v0.26.0 \
gcr.io/mirrornode/hedera-mirror-web3:0.103.0 \
ghcr.io/hashgraph/full-stack-testing/ubi8-init-java21:0.24.5 \
quay.io/prometheus/node-exporter:v1.6.1 \
ghcr.io/hashgraph/hedera-json-rpc-relay:0.46.0 \
quay.io/kiwigrid/k8s-sidecar:1.25.1 \
quay.io/minio/minio:RELEASE.2024-02-09T21-25-16Z \
quay.io/minio/operator:v5.0.7 \
quay.io/prometheus/prometheus:v2.47.1 \
registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.10.0 \
)

function download_images() {
for im in "${SOLO_IMAGE_LIST[@]}"; do
echo "Pulling image: ${im}"
docker pull --quiet "${im}"
sleep 1
done
}

function load_images() {
for im in "${SOLO_IMAGE_LIST[@]}"; do
echo "Loading image: ${im}"
kind load docker-image "${im}" -n $SOLO_CLUSTER
done
}
12 changes: 9 additions & 3 deletions test/e2e/commands/mirror_node.test.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
*/

import {
afterAll, afterEach, describe,
afterAll, afterEach, beforeAll, describe,
expect,
it
} from '@jest/globals'
Expand Down Expand Up @@ -44,7 +44,7 @@ describe('MirrorNodeCommand', () => {
argv[flags.releaseTag.name] = 'v0.47.0-alpha.0'
argv[flags.keyFormat.name] = constants.KEY_FORMAT_PEM

argv[flags.nodeIDs.name] = 'node0,node1,node2'
argv[flags.nodeIDs.name] = 'node0' // use a single node to reduce resource during e2e tests
argv[flags.generateGossipKeys.name] = true
argv[flags.generateTlsKeys.name] = true
argv[flags.clusterName.name] = TEST_CLUSTER
Expand All @@ -58,9 +58,15 @@ describe('MirrorNodeCommand', () => {
const downloader = new core.PackageDownloader(mirrorNodeCmd.logger)
const accountManager = bootstrapResp.opts.accountManager

beforeAll(() => {
bootstrapResp.opts.logger.showUser(`------------------------- START: ${testName} ----------------------------`)
})

afterAll(async () => {
await k8.deleteNamespace(namespace)
await accountManager.close()

bootstrapResp.opts.logger.showUser(`------------------------- END: ${testName} ----------------------------`)
})

afterEach(async () => {
Expand All @@ -75,7 +81,7 @@ describe('MirrorNodeCommand', () => {
mirrorNodeCmd.logger.showUserError(e)
expect(e).toBeNull()
}
}, 480000)
}, 600000)

it('mirror node api and hedera explorer should success', async () => {
await accountManager.loadNodeClient(namespace)
Expand Down
18 changes: 7 additions & 11 deletions test/e2e/setup-e2e.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,14 @@ kind delete cluster -n "${SOLO_CLUSTER_NAME}" || true
kind create cluster -n "${SOLO_CLUSTER_NAME}" --image "${KIND_IMAGE}" || exit 1

# **********************************************************************************************************************
# Warm up the cluster by deploying the network
# This also helps to have the cluster loaded with the images.
# Most of the e2e test should bootstrap its own network in its own namespace. However, some tests can use this as a
# shared resource if required.
# Warm up the cluster
# **********************************************************************************************************************
solo init --namespace "${SOLO_NAMESPACE}" -i node0,node1,node2 -t v0.47.0 -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" --dev || exit 1 # cache args for subsequent commands
solo cluster setup || exit 1
helm list --all-namespaces
solo network deploy || exit 1
source test/data/warmup-cluster.sh; download_images; load_images

# **********************************************************************************************************************
# Don't delete the namespaces as some e2e tests (i.e. test/e2e/core/*.test.mjs) still uses it as shared resources.
# Init and deploy a network for e2e tests in (test/e2e/core)
# **********************************************************************************************************************
# kubectl delete ns "${SOLO_NAMESPACE}"v
# kubectl delete ns "${SOLO_CLUSTER_SETUP_NAMESPACE}"
solo init --namespace "${SOLO_NAMESPACE}" -i node0,node1,node2 -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" --dev || exit 1 # cache args for subsequent commands
solo cluster setup || exit 1
helm list --all-namespaces
solo network deploy || exit 1
12 changes: 10 additions & 2 deletions test/test_util.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
* limitations under the License.
*
*/
import { describe, expect, it } from '@jest/globals'
import { afterAll, beforeAll, describe, expect, it } from '@jest/globals'
import fs from 'fs'
import os from 'os'
import path from 'path'
Expand Down Expand Up @@ -176,6 +176,14 @@ export function bootstrapNetwork (testName, argv,
const chartManager = bootstrapResp.opts.chartManager

describe(`Bootstrap network for test [release ${argv[flags.releaseTag.name]}, keyFormat: ${argv[flags.keyFormat.name]}]`, () => {
beforeAll(() => {
bootstrapResp.opts.logger.showUser(`------------------------- START: bootstrap (${testName}) ----------------------------`)
})

afterAll(() => {
bootstrapResp.opts.logger.showUser(`------------------------- END: bootstrap (${testName}) ----------------------------`)
})

it('should cleanup previous deployment', async () => {
await initCmd.init(argv)

Expand All @@ -191,7 +199,7 @@ export function bootstrapNetwork (testName, argv,
if (!await chartManager.isChartInstalled(constants.FULLSTACK_SETUP_NAMESPACE, constants.FULLSTACK_CLUSTER_SETUP_CHART)) {
await clusterCmd.setup(argv)
}
}, 60000)
}, 120000)

it('should succeed with network deploy', async () => {
await networkCmd.deploy(argv)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ describe('DependencyManager', () => {

it('should succeed during helm dependency check', async () => {
await expect(depManager.checkDependency(constants.HELM)).resolves.toBe(true)
})
}, 10000)
}, 60000)
})
})
2 changes: 1 addition & 1 deletion version.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -21,5 +21,5 @@

export const JAVA_VERSION = '21.0.1+12'
export const HELM_VERSION = 'v3.14.2'
export const FST_CHART_VERSION = 'v0.24.3'
export const FST_CHART_VERSION = 'v0.24.5'
export const HEDERA_PLATFORM_VERSION = 'v0.47.0'

0 comments on commit 5a5a02f

Please sign in to comment.