From e91f465943e935f96f56283b294bc67997f06160 Mon Sep 17 00:00:00 2001 From: Lenin Mehedy Date: Fri, 22 Sep 2023 07:27:22 +1000 Subject: [PATCH] feat: implement fullstack-cluster-setup chart for shared resources (#363) Signed-off-by: Lenin Mehedy Signed-off-by: Nathan Klick Co-authored-by: Nathan Klick --- .../project-plugins/src/main/kotlin/Utils.kt | 24 ++++++ ...era.fullstack.aggregate-reports.gradle.kts | 10 +-- charts/fullstack-cluster-setup/.helmignore | 23 ++++++ charts/fullstack-cluster-setup/Chart.yaml | 24 ++++++ .../templates/NOTES.txt | 0 .../templates/_helpers.tpl | 51 +++++++++++++ .../templates/gateway-api/fst-gateway.yaml | 9 +++ .../templates/rbac/pod-monitor-role.yaml | 31 ++++++++ charts/fullstack-cluster-setup/values.yaml | 8 ++ charts/hedera-network/Chart.lock | 2 +- .../templates/gateway-api/gateway.yaml | 12 --- .../templates/rbac/pod-monitor-role.yaml | 42 ----------- .../templates/rbac/pod-monitor.yaml | 19 +++++ .../templates/rbac/service-accounts.yaml | 5 -- .../templates/tests/test-deployment.yaml | 8 ++ charts/hedera-network/tests/env.sh | 19 +++-- charts/hedera-network/tests/env.template | 4 + charts/hedera-network/tests/helper.sh | 38 +++++----- charts/hedera-network/tests/run.sh | 34 ++++++++- .../tests/test_basic_deployment.bats | 4 +- .../tests/test_sidecar_deployment.bats | 38 +++++----- charts/hedera-network/values.yaml | 10 +-- dev/Makefile | 24 +++--- dev/gateway-api/Makefile | 3 + dev/scripts/env.sh | 14 ++++ dev/scripts/gateway.sh | 22 +++--- dev/scripts/main.sh | 75 ++++++++++++++++--- dev/scripts/template.env | 9 ++- .../grafana/example-tracing-app.yaml | 2 - 29 files changed, 406 insertions(+), 158 deletions(-) create mode 100644 charts/fullstack-cluster-setup/.helmignore create mode 100644 charts/fullstack-cluster-setup/Chart.yaml create mode 100644 charts/fullstack-cluster-setup/templates/NOTES.txt create mode 100644 charts/fullstack-cluster-setup/templates/_helpers.tpl create mode 100644 charts/fullstack-cluster-setup/templates/gateway-api/fst-gateway.yaml create mode 100644 charts/fullstack-cluster-setup/templates/rbac/pod-monitor-role.yaml create mode 100644 charts/fullstack-cluster-setup/values.yaml delete mode 100644 charts/hedera-network/templates/rbac/pod-monitor-role.yaml create mode 100644 charts/hedera-network/templates/rbac/pod-monitor.yaml delete mode 100644 charts/hedera-network/templates/rbac/service-accounts.yaml diff --git a/build-logic/project-plugins/src/main/kotlin/Utils.kt b/build-logic/project-plugins/src/main/kotlin/Utils.kt index 1fcc3c6b1..4d367c0f1 100644 --- a/build-logic/project-plugins/src/main/kotlin/Utils.kt +++ b/build-logic/project-plugins/src/main/kotlin/Utils.kt @@ -39,6 +39,30 @@ class Utils { updateStringInFile(manifestFile, "appVersion:", "appVersion: \"${newVersion}\"") } + @JvmStatic + fun updateHelmChartVersion(project: Project, newVersion: SemVer) { + updateHelmCharts(project) {chart -> + updateHelmChartVersion(project, chart.name, newVersion) + } + } + + @JvmStatic + fun updateHelmChartAppVersion(project: Project, newVersion: SemVer) { + updateHelmCharts(project) {chart -> + updateHelmChartAppVersion(project, chart.name, newVersion) + } + } + + @JvmStatic + fun updateHelmCharts(project: Project, fn: (File) -> Unit) { + val chartDir = File(project.rootProject.projectDir, "charts") + chartDir.listFiles()?.forEach { chart -> + if (chart.isDirectory && File(chart, "Chart.yaml").exists()) { + fn(chart) + } + } + } + private fun updateStringInFile(file: File, startsWith: String, newString: String, ignoreLeadingSpace: Boolean = true) { var lines: List = mutableListOf() diff --git a/build-logic/project-plugins/src/main/kotlin/com.hedera.fullstack.aggregate-reports.gradle.kts b/build-logic/project-plugins/src/main/kotlin/com.hedera.fullstack.aggregate-reports.gradle.kts index d87ef35a2..4ceffc4c0 100644 --- a/build-logic/project-plugins/src/main/kotlin/com.hedera.fullstack.aggregate-reports.gradle.kts +++ b/build-logic/project-plugins/src/main/kotlin/com.hedera.fullstack.aggregate-reports.gradle.kts @@ -50,8 +50,6 @@ tasks.register("githubVersionSummary") { } } -val HEDERA_NETWORK_CHART = "hedera-network" - tasks.register("versionAsSpecified") { group = "versioning" doLast { @@ -59,8 +57,8 @@ tasks.register("versionAsSpecified") { ?: throw IllegalArgumentException("No newVersion property provided! Please add the parameter -PnewVersion= when running this task.") val newVer = SemVer.parse(verStr) - Utils.updateHelmChartVersion(project, HEDERA_NETWORK_CHART, newVer) - Utils.updateHelmChartAppVersion(project, HEDERA_NETWORK_CHART, newVer) + Utils.updateHelmChartVersion(project, newVer) + Utils.updateHelmChartAppVersion(project, newVer) Utils.updateVersion(project, newVer) } } @@ -71,8 +69,8 @@ tasks.register("versionAsSnapshot") { val currVer = SemVer.parse(project.version.toString()) val newVer = SemVer(currVer.major, currVer.minor, currVer.patch, "SNAPSHOT") - Utils.updateHelmChartVersion(project, HEDERA_NETWORK_CHART, newVer) - Utils.updateHelmChartAppVersion(project, HEDERA_NETWORK_CHART, newVer) + Utils.updateHelmChartVersion(project, newVer) + Utils.updateHelmChartAppVersion(project, newVer) Utils.updateVersion(project, newVer) } } diff --git a/charts/fullstack-cluster-setup/.helmignore b/charts/fullstack-cluster-setup/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/charts/fullstack-cluster-setup/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/charts/fullstack-cluster-setup/Chart.yaml b/charts/fullstack-cluster-setup/Chart.yaml new file mode 100644 index 000000000..7a8f796e9 --- /dev/null +++ b/charts/fullstack-cluster-setup/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: fullstack-cluster-setup +description: A Helm chart to setup shared resources for fullstack-testing + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.8.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "0.8.0" diff --git a/charts/fullstack-cluster-setup/templates/NOTES.txt b/charts/fullstack-cluster-setup/templates/NOTES.txt new file mode 100644 index 000000000..e69de29bb diff --git a/charts/fullstack-cluster-setup/templates/_helpers.tpl b/charts/fullstack-cluster-setup/templates/_helpers.tpl new file mode 100644 index 000000000..fb8bd8fe3 --- /dev/null +++ b/charts/fullstack-cluster-setup/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "fullstack-cluster-setup.name" -}} +{{- default .Chart.Name .Values.global.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "fullstack-cluster-setup.fullname" -}} +{{- if .Values.global.fullnameOverride }} +{{- .Values.global.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.global.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "fullstack-cluster-setup.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "fullstack-cluster-setup.labels" -}} +helm.sh/chart: {{ include "fullstack-cluster-setup.chart" . }} +{{ include "fullstack-cluster-setup.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "fullstack-cluster-setup.selectorLabels" -}} +app.kubernetes.io/name: {{ include "fullstack-cluster-setup.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/fullstack-cluster-setup/templates/gateway-api/fst-gateway.yaml b/charts/fullstack-cluster-setup/templates/gateway-api/fst-gateway.yaml new file mode 100644 index 000000000..f869dd944 --- /dev/null +++ b/charts/fullstack-cluster-setup/templates/gateway-api/fst-gateway.yaml @@ -0,0 +1,9 @@ +apiVersion: gateway.networking.k8s.io/v1beta1 +kind: GatewayClass +metadata: + name: fst-gateway-class + labels: + fullstack.hedera.com/type: gateway-class +spec: + controllerName: "gateway.envoyproxy.io/gatewayclass-controller" + #controllerName: "haproxy-ingress.github.io/controller" diff --git a/charts/fullstack-cluster-setup/templates/rbac/pod-monitor-role.yaml b/charts/fullstack-cluster-setup/templates/rbac/pod-monitor-role.yaml new file mode 100644 index 000000000..a2328539e --- /dev/null +++ b/charts/fullstack-cluster-setup/templates/rbac/pod-monitor-role.yaml @@ -0,0 +1,31 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: pod-monitor-role + labels: + fullstack.hedera.com/type: cluster-role +rules: + - apiGroups: [ "" ] + resources: + - pods + - services + - clusterroles + - pods/log + - secrets + verbs: + - get + - list + - apiGroups: [ "" ] + resources: + - pods/exec + verbs: + - create + - apiGroups: [ "gateway.networking.k8s.io" ] + resources: + - gatewayclasses + - gateways + - httproutes + - tcproutes + verbs: + - get + - list \ No newline at end of file diff --git a/charts/fullstack-cluster-setup/values.yaml b/charts/fullstack-cluster-setup/values.yaml new file mode 100644 index 000000000..d22bdbb64 --- /dev/null +++ b/charts/fullstack-cluster-setup/values.yaml @@ -0,0 +1,8 @@ +# Default values for fullstack-cluster-setup. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + namespaceOverride: "" + nameOverride: "" + fullnameOverride: "" \ No newline at end of file diff --git a/charts/hedera-network/Chart.lock b/charts/hedera-network/Chart.lock index 21ec42f86..6d168fe77 100644 --- a/charts/hedera-network/Chart.lock +++ b/charts/hedera-network/Chart.lock @@ -9,4 +9,4 @@ dependencies: repository: https://operator.min.io/ version: 5.0.7 digest: sha256:cf355b295abceb5814ef57d3e146ec9d4e8db7365a700079d683bd5f766ad374 -generated: "2023-09-16T13:47:19.087992+10:00" +generated: "2023-09-20T13:51:41.203996+10:00" diff --git a/charts/hedera-network/templates/gateway-api/gateway.yaml b/charts/hedera-network/templates/gateway-api/gateway.yaml index a041c1cac..4bea8d1c5 100644 --- a/charts/hedera-network/templates/gateway-api/gateway.yaml +++ b/charts/hedera-network/templates/gateway-api/gateway.yaml @@ -1,16 +1,4 @@ -{{- if $.Values.gatewayApi.gatewayClass.enable | eq "true" }} -apiVersion: gateway.networking.k8s.io/v1beta1 -kind: GatewayClass -metadata: - name: {{ $.Values.gatewayApi.gatewayClass.name }} - namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} - labels: - fullstack.hedera.com/type: gateway-class -spec: - controllerName: {{ $.Values.gatewayApi.gatewayClass.controllerName }} -{{- end }} {{- if $.Values.gatewayApi.gateway.enable | eq "true" }} ---- apiVersion: gateway.networking.k8s.io/v1beta1 kind: Gateway metadata: diff --git a/charts/hedera-network/templates/rbac/pod-monitor-role.yaml b/charts/hedera-network/templates/rbac/pod-monitor-role.yaml deleted file mode 100644 index dd8ccbd8e..000000000 --- a/charts/hedera-network/templates/rbac/pod-monitor-role.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: pod-monitoring-role - namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} -rules: - - apiGroups: [ "" ] - resources: - - pods - - pods/log - - secrets - verbs: - - get - - list - - apiGroups: [ "" ] - resources: - - pods/exec - verbs: - - create - - apiGroups: [ "gateway.networking.k8s.io" ] - resources: - - gatewayclasses - - gateways - - httproutes - - tcproutes - verbs: - - get - - list ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: pod-monitoring-role-binding - namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} -subjects: - - kind: ServiceAccount - name: pod-monitor - namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} -roleRef: - kind: ClusterRole - name: pod-monitoring-role - apiGroup: rbac.authorization.k8s.io diff --git a/charts/hedera-network/templates/rbac/pod-monitor.yaml b/charts/hedera-network/templates/rbac/pod-monitor.yaml new file mode 100644 index 000000000..d6912a9b8 --- /dev/null +++ b/charts/hedera-network/templates/rbac/pod-monitor.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: pod-monitor + namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: pod-monitor-role-binding + namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} +subjects: + - kind: ServiceAccount + name: pod-monitor + namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} +roleRef: + kind: ClusterRole + name: {{ $.Values.tester.clusterRoleName }} + apiGroup: rbac.authorization.k8s.io diff --git a/charts/hedera-network/templates/rbac/service-accounts.yaml b/charts/hedera-network/templates/rbac/service-accounts.yaml deleted file mode 100644 index 0e4b26311..000000000 --- a/charts/hedera-network/templates/rbac/service-accounts.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: pod-monitor - namespace: {{ default $.Release.Namespace $.Values.global.namespaceOverride }} diff --git a/charts/hedera-network/templates/tests/test-deployment.yaml b/charts/hedera-network/templates/tests/test-deployment.yaml index b69a5a628..18a4226c5 100644 --- a/charts/hedera-network/templates/tests/test-deployment.yaml +++ b/charts/hedera-network/templates/tests/test-deployment.yaml @@ -28,8 +28,16 @@ spec: env: - name: TESTS_DIR value: "/tests" # should be same as mountPath + - name: LOG_DIR + value: "/tmp/fullstack-testing-logs" + - name: LOG_FILE + value: "helm-test.log" - name: OUTPUT_LOG # outputs the logs from the tests value: "true" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace command: - "/bin/bash" - "-c" diff --git a/charts/hedera-network/tests/env.sh b/charts/hedera-network/tests/env.sh index 76644eda3..e86a91f40 100755 --- a/charts/hedera-network/tests/env.sh +++ b/charts/hedera-network/tests/env.sh @@ -6,24 +6,23 @@ # load .env file if it exists in order to load variables with custom values ENV_FILE="$(dirname "${BASH_SOURCE[0]}")/.env" if [[ -f "${ENV_FILE}" ]]; then - export $(cat "${ENV_FILE}" | xargs) + set -a + # shellcheck source=./../temp/.env + source "${ENV_FILE}" + set +a fi - # set global env variables if not set BATS_HOME="${BATS_HOME:-../../../dev/bats}" TESTS_DIR="${TESTS_DIR:-.}" - -OUTPUT_LOG="${OUTPUT_LOG}" -LOG_DIR="${LOG_DIR:-/tmp/bats-test-logs}" -LOG_FILE="test.log" [ ! -d "${LOG_DIR}" ] && mkdir "${LOG_DIR}" -echo "" -echo "Env variables" -echo "==============================================" +echo "--------------------------Env Setup: fullstack-testing Helm Test------------------------------------------------" +echo "NAMESPACE: ${NAMESPACE}" echo "ENV_FILE: ${ENV_FILE}" echo "BATS_HOME: ${BATS_HOME}" echo "TESTS_DIR: ${TESTS_DIR}" +echo "LOG: ${LOG_DIR}/${LOG_FILE}" echo "OUTPUT_LOG: ${OUTPUT_LOG}" - +echo "-----------------------------------------------------------------------------------------------------" +echo "" diff --git a/charts/hedera-network/tests/env.template b/charts/hedera-network/tests/env.template index 3349c2874..1df446a1c 100644 --- a/charts/hedera-network/tests/env.template +++ b/charts/hedera-network/tests/env.template @@ -1 +1,5 @@ TOTAL_NODES=3 + +LOG_DIR="${LOG_DIR:-/tmp/fullstack-testing-logs}" +LOG_FILE="${LOG_FILE:-helm-test.log}" +OUTPUT_LOG="${OUTPUT_LOG:-false}" diff --git a/charts/hedera-network/tests/helper.sh b/charts/hedera-network/tests/helper.sh index 386195f3d..b86255dfa 100644 --- a/charts/hedera-network/tests/helper.sh +++ b/charts/hedera-network/tests/helper.sh @@ -32,7 +32,7 @@ function import { function get_pod_list() { local pattern=$1 - local resp=$(kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}' | grep "${pattern}") + local resp=$(kubectl get pods -o=jsonpath='{range .items[*]}{.metadata.name}{"\n"}' -n "${NAMESPACE}" | grep "${pattern}") echo "${resp}" } @@ -82,8 +82,8 @@ function check_test_status() { function get_config_val() { local config_path=$1 - log_debug "Get config command: helm get values fst -a | yq '${config_path}'" - ret=$(helm get values fst -a | yq "${config_path}" ) + log_debug "Get config command: helm get values fst -a -n ${NAMESPACE} | yq '${config_path}'" + ret=$(helm get values fst -a -n "${NAMESPACE}" | yq "${config_path}" ) echo "${ret}" log_debug "${config_path} => ${ret}" } @@ -101,7 +101,7 @@ function is_enabled_for_node() { local config_path=$2 [[ -z "${config_path}" ]] && echo "ERROR: Config path is needed" && return "${EX_ERR}" - log_debug "Checking config '${config_path}' for node '${node_name}" + log_debug "Checking config '${config_path}' for node '${node_name} in namespace ${NAMESPACE} " local default_config_path=".defaults${config_path}" local node_config_path=".hedera.nodes[] | select(.name==\"${node_name}\") | ${config_path}" @@ -134,7 +134,7 @@ function get_sidecar_status() { [[ -z "${pod}" ]] && echo "ERROR: Pod name is needed (is_sidecar_ready)" && return "${EX_ERR}" [[ -z "${sidecar_name}" ]] && echo "ERROR: Sidecar name is needed (is_sidecar_ready)" && return "${EX_ERR}" - local sidecar_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.containerStatuses[?(@.name=='${sidecar_name}')].ready}" | xargs) + local sidecar_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.containerStatuses[?(@.name=='${sidecar_name}')].ready}" -n "${NAMESPACE}" | xargs) echo "${sidecar_status}" } @@ -144,9 +144,9 @@ function is_sidecar_ready() { [[ -z "${pod}" ]] && echo "ERROR: Pod name is needed (is_sidecar_ready)" && return "${EX_ERR}" [[ -z "${sidecar_name}" ]] && echo "ERROR: Sidecar name is needed (is_sidecar_ready)" && return "${EX_ERR}" - local sidecar_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.containerStatuses[?(@.name=='${sidecar_name}')].ready}" | tr '[:lower:]' '[:upper:]') + local sidecar_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.containerStatuses[?(@.name=='${sidecar_name}')].ready}" -n "${NAMESPACE}" | tr '[:lower:]' '[:upper:]') [ -z "${sidecar_status}" ] && sidecar_status="FALSE" - log_debug "${sidecar_name} in pod ${pod} is ready: ${sidecar_status}" + log_debug "${sidecar_name} in pod ${pod} is ready in namespace ${NAMESPACE} : ${sidecar_status}" [[ "${sidecar_status}" = "TRUE" ]] && return "${EX_OK}" return "${EX_ERR}" @@ -158,8 +158,8 @@ function has_sidecar() { [[ -z "${pod}" ]] && echo "ERROR: Pod name is needed (is_sidecar_ready)" && return "${EX_ERR}" [[ -z "${sidecar_name}" ]] && echo "ERROR: Sidecar name is needed (is_sidecar_ready)" && return "${EX_ERR}" - local sidecars=$(kubectl get pods "${pod}" -o jsonpath='{.spec.containers[*].name}') - log_debug "Sidecar list in pod ${pod}: ${sidecars}" + local sidecars=$(kubectl get pods "${pod}" -o jsonpath='{.spec.containers[*].name}' -n "${NAMESPACE}") + log_debug "Sidecar list in pod ${pod} in namespace ${NAMESPACE} : ${sidecars}" local found="FALSE" if [[ "${sidecars}" =~ ${sidecar_name} ]]; then @@ -173,10 +173,10 @@ function is_pod_ready() { local pod=$1 [[ -z "${pod}" ]] && echo "ERROR: Pod name is needed (is_pod_ready)" && return "${EX_ERR}" - local pod_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.conditions[?(@.type=='Ready')].status}" | tr '[:lower:]' '[:upper:]') + local pod_status=$(kubectl get pod "${pod}" -o jsonpath="{.status.conditions[?(@.type=='Ready')].status}" -n "${NAMESPACE}" | tr '[:lower:]' '[:upper:]') [ -z "${pod_status}" ] && pod_status="FALSE" - log_debug "Pod '${pod}' is ready: ${pod_status}" + log_debug "Pod '${pod}' is ready in namespace ${NAMESPACE} : ${pod_status}" [[ "${pod_status}" = "TRUE" ]] && return "${EX_OK}" return "${EX_ERR}" @@ -190,15 +190,15 @@ function get_pod_label() { [[ -z "${pod}" ]] && echo "ERROR: Label name is needed" && return "${EX_ERR}" - log_debug "Checking for pod '${pod}'(timeout 300s)..." - $(kubectl wait --for=condition=Initialized pods "${pod}" --timeout 300s) > /dev/null 2>&1 + log_debug "Checking for pod '${pod}' in namespace ${NAMESPACE} (timeout 300s)..." + $(kubectl wait --for=condition=Initialized pods "${pod}" --timeout 300s -n "${NAMESPACE}") > /dev/null 2>&1 if [ $? = 1 ]; then log_debug "ERROR: Pod ${pod} is not available" && return "${EX_ERR}" fi - log_debug "Checking label '${label}' for pod '${pod}'" + log_debug "Checking label '${label}' for pod '${pod}' in namespace ${NAMESPACE} " local escaped_label="${label//./\\.}" - local label_val=$(kubectl get pod "${pod}" -o jsonpath="{.metadata.labels.${escaped_label}}" | xargs) + local label_val=$(kubectl get pod "${pod}" -o jsonpath="{.metadata.labels.${escaped_label}}" -n "${NAMESPACE}" | xargs) log_debug "Pod '${pod}' label '${label}': ${label_val}" echo "${label_val}" @@ -208,9 +208,9 @@ function get_pod_by_label() { local label=$1 [[ -z "${pod}" ]] && echo "ERROR: Label name is needed" && return "${EX_ERR}" - log_debug "Getting pod by label '${label}'" + log_debug "Getting pod by label '${label}' in namespace ${NAMESPACE} " local escaped_label="${label//./\\.}" - local pod_name=$(kubectl get pods -l "${label}" -o jsonpath="{.items[0].metadata.name}") + local pod_name=$(kubectl get pods -l "${label}" -o jsonpath="{.items[0].metadata.name}" -n "${NAMESPACE}") echo "${pod_name}" } @@ -221,10 +221,10 @@ function is_route_accepted() { local route_name=$2 [[ -z "${route_name}" ]] && echo "ERROR: Route name is needed" && return "${EX_ERR}" - local route_status=$(kubectl get "${route_type}" "${route_name}" -o jsonpath="{.status.parents[*].conditions[?(@.type=='Accepted')].status}" | tr '[:lower:]' '[:upper:]') + local route_status=$(kubectl get "${route_type}" "${route_name}" -o jsonpath="{.status.parents[*].conditions[?(@.type=='Accepted')].status}" -n "${NAMESPACE}" | tr '[:lower:]' '[:upper:]') [ -z "${route_status}" ] && route_status="FALSE" - log_debug "${route_type} '${route_name}' is accepted: ${route_status}" + log_debug "${route_type} '${route_name}' in namespace ${NAMESPACE} is accepted: ${route_status}" [[ "${route_status}" = "TRUE" ]] && return "${EX_OK}" return "${EX_ERR}" diff --git a/charts/hedera-network/tests/run.sh b/charts/hedera-network/tests/run.sh index 382c00699..b872867bf 100755 --- a/charts/hedera-network/tests/run.sh +++ b/charts/hedera-network/tests/run.sh @@ -1,16 +1,33 @@ #!/usr/bin/env bash -source "$(dirname "${BASH_SOURCE[0]}")/env.sh" -source "$(dirname "${BASH_SOURCE[0]}")/logging.sh" +CUR_DIR=$(dirname "${BASH_SOURCE[0]}") +source "${CUR_DIR}/env.sh" +source "${CUR_DIR}/logging.sh" clear_log +echo "Cluster Resources" +echo "NAMESPACE: ${NAMESPACE}" +echo "=============================================================" +echo "------------------------------------------- Namespaces ----------------------------------------------------------" +kubectl get ns +kubectl config get-contexts +echo "------------------------------------------- GatewayClass ---------------------------------------------------------" +kubectl get GatewayClass +echo "------------------------------------------- ClusterRole ----------------------------------------------------------" +kubectl get ClusterRole +echo "------------------------------------------- Pods -----------------------------------------------------------------" +kubectl get pods +echo "------------------------------------------- Services -------------------------------------------------------------" +kubectl get svc +echo "------------------------------------------------------------------------------------------------------------------" + echo "" -echo "BATS directory: $BATS_HOME" +echo "File list in 'BATS_HOME': $BATS_HOME" echo "=============================================================" ls -la "${BATS_HOME}" echo "" -echo "Tests directory: $TESTS_DIR" +echo "File list in 'TEST_DIR': $TESTS_DIR" echo "=============================================================" ls -la "${TESTS_DIR}" @@ -24,8 +41,17 @@ if [[ -z "${test_file}" ]]; then else "${BATS_HOME}/bats-core/bin/bats" "${TESTS_DIR}/${test_file}" fi + readonly bats_exec_status=$? +# print test status in the log file +log_debug "Exit code: ${bats_exec_status}" +if [[ $bats_exec_status -eq 0 ]];then + log_debug "Test status: PASS" +else + log_debug "Test status: FAIL" +fi + # uncomment in order to inspect tmpdir #"${BATS_HOME}/bats-core/bin/bats" --no-tempdir-cleanup . diff --git a/charts/hedera-network/tests/test_basic_deployment.bats b/charts/hedera-network/tests/test_basic_deployment.bats index 1459a9e5c..69f0b31b0 100644 --- a/charts/hedera-network/tests/test_basic_deployment.bats +++ b/charts/hedera-network/tests/test_basic_deployment.bats @@ -10,7 +10,7 @@ setup() { log_debug "Expected total nodes: ${TOTAL_NODES}" log_debug "----------------------------------------------------------------------------" - kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l fullstack.hedera.com/type=network-node --timeout=300s || return "${EX_ERR}" + kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l fullstack.hedera.com/type=network-node --timeout=300s -n "${NAMESPACE}" || return "${EX_ERR}" local resp="$(get_pod_list network-node)" local nodes=(${resp}) # convert into an array @@ -53,7 +53,7 @@ setup() { # make few attempts to check systemctl status while [[ "${attempts}" -lt "${MAX_ATTEMPTS}" && "${systemctl_status}" -ne "${EX_OK}" ]]; do attempts=$((attempts + 1)) - kubectl exec "${node}" -c root-container -- systemctl status --no-pager + kubectl exec "${node}" -c root-container -n "${NAMESPACE}" -- systemctl status --no-pager systemctl_status="${?}" log_debug "Checked systemctl status in ${node} (Attempt #${attempts}/${MAX_ATTEMPTS})... >>>>> status: ${systemctl_status} <<<<<" if [[ "${systemctl_status}" -ne "${EX_OK}" ]]; then diff --git a/charts/hedera-network/tests/test_sidecar_deployment.bats b/charts/hedera-network/tests/test_sidecar_deployment.bats index 399da3d44..e45558b41 100644 --- a/charts/hedera-network/tests/test_sidecar_deployment.bats +++ b/charts/hedera-network/tests/test_sidecar_deployment.bats @@ -14,30 +14,34 @@ function run_default_sidecar_check() { local resp="$(get_pod_list network-node)" local pods=(${resp}) # convert into an array - local test_status="${PASS}" + log_debug "Network node: ${pods[*]}" + + local test_status="${FAIL}" local status_val="${EX_ERR}" - for pod in "${pods[@]}"; do - log_debug "" - log_debug "Checking pod ${pod} for sidecar ${sidecar_name}" + if [[ "${#pods[@]}" -gt 0 ]]; then + test_status="${PASS}" + for pod in "${pods[@]}"; do + log_debug "" + log_debug "Checking pod ${pod} for sidecar ${sidecar_name}" - local should_enable=$(get_config_val_upper "${enable_config_path}") - log_debug "${sidecar_name} is enabled in pod ${pod}: ${should_enable}" + local should_enable=$(get_config_val_upper "${enable_config_path}") + log_debug "${sidecar_name} is enabled in pod ${pod}: ${should_enable}" - local sidecar_exists=$(has_sidecar "${pod}" "${sidecar_name}" ) - log_debug "${sidecar_name} exists in pod ${pod}: ${sidecar_exists} " + local sidecar_exists=$(has_sidecar "${pod}" "${sidecar_name}" ) + log_debug "${sidecar_name} exists in pod ${pod}: ${sidecar_exists} " - log_debug "${should_enable} ${sidecar_exists}" - if [ "${should_enable}" = "TRUE" ] && [ "${sidecar_exists}" = "TRUE" ]; then - is_sidecar_ready "${pod}" "${sidecar_name}" || test_status="${FAIL}" - elif [[ "${should_enable}" != "${sidecar_exists}" ]]; then - test_status="${FAIL}" - fi + if [ "${should_enable}" = "TRUE" ] && [ "${sidecar_exists}" = "TRUE" ]; then + is_sidecar_ready "${pod}" "${sidecar_name}" || test_status="${FAIL}" + elif [[ "${should_enable}" != "${sidecar_exists}" ]]; then + test_status="${FAIL}" + fi - [ "${test_status}" = "FAIL" ] && break - done + [ "${test_status}" = "FAIL" ] && break + done + fi log_debug "" - log_debug "[${test_status}] ${sidecar_name} sidecar is running in all network-node pods" + log_debug "[${test_status}] ${sidecar_name} sidecar is running in all network-node pods in namespace ${NAMESPACE}" log_debug "" # assert success diff --git a/charts/hedera-network/values.yaml b/charts/hedera-network/values.yaml index a54169b2a..5d2f1b58a 100644 --- a/charts/hedera-network/values.yaml +++ b/charts/hedera-network/values.yaml @@ -26,6 +26,7 @@ terminationGracePeriodSeconds: 10 # helm test container tester: + clusterRoleName: "pod-monitor-role" # this is a shared cluster role for all namespaces image: registry: "ghcr.io" repository: "hashgraph/full-stack-testing/kubectl-bats" @@ -36,10 +37,7 @@ tester: # gateway-api configuration gatewayApi: gatewayClass: - name: "fst" - enable: "true" - controllerName: "gateway.envoyproxy.io/gatewayclass-controller" -# controllerName: "haproxy-ingress.github.io/controller" + name: "fst-gateway-class" # this is a shared gateway class for all namespaces gateway: name: "fst" enable: "true" @@ -71,8 +69,8 @@ gatewayApi: defaults: resources: requests: - cpu: 1 - memory: 2G + cpu: 100m + memory: 100Mi limits: cpu: 1 memory: 2G diff --git a/dev/Makefile b/dev/Makefile index e6f5a839a..e62797fe6 100644 --- a/dev/Makefile +++ b/dev/Makefile @@ -59,20 +59,26 @@ uninstall-chart: update-helm-dependencies: helm dependency update ../charts/hedera-network +.PHONY: deploy-shared +deploy-shared: update-helm-dependencies deploy-gateway-api deploy-prometheus-operator deploy-minio-operator-if-required + source "${SCRIPTS_DIR}/main.sh" && deploy_shared # run only after gateway-api CRDs are available + +.PHONY: destroy-shared +destroy-shared: + -$(MAKE) source "${SCRIPTS_DIR}/main.sh" && destroy_shared + -$(MAKE) undeploy-minio-operator + -$(MAKE) destroy-prometheus-operator + -$(MAKE) destroy-gateway-api # should be destroyed at the end when no more gateway-api CRDs are required + .PHONY: deploy-chart deploy-chart: - $(MAKE) update-helm-dependencies - $(MAKE) deploy-minio-operator-if-required - $(MAKE) deploy-prometheus-operator - $(MAKE) deploy-gateway-api + $(MAKE) deploy-shared $(MAKE) install-chart .PHONY: destroy-chart destroy-chart: -$(MAKE) uninstall-chart - -$(MAKE) destroy-gateway-api - -$(MAKE) destroy-prometheus-operator - -$(MAKE) undeploy-minio-operator + -$(MAKE) destroy-shared .PHONY: deploy-network deploy-network: deploy-chart @@ -94,7 +100,7 @@ deploy-network: deploy-chart kubectl wait --for=jsonpath='{.status.phase}'=Running pod -l fullstack.hedera.com/type=network-node --timeout=600s .PHONY: destroy-network -destroy-network: destroy-test-container destroy-chart +destroy-network: destroy-test-container uninstall-chart .PHONY: setup-nodes setup-nodes: setup @@ -220,7 +226,7 @@ helm-test: destroy-test-container: echo "" && \ echo ">> Deleting test container..." && \ - kubectl delete pod network-test || true + kubectl delete pod network-test -n "${NAMESPACE}" || true ######################################### CI ################################# .PHONY: local-kubectl-bats diff --git a/dev/gateway-api/Makefile b/dev/gateway-api/Makefile index fe8a9cd6e..b2d82c5dd 100644 --- a/dev/gateway-api/Makefile +++ b/dev/gateway-api/Makefile @@ -1,3 +1,6 @@ +# Force the use of bash as the shell for more features +SHELL=/bin/bash + SHELLOPTS:=$(if $(SHELLOPTS),$(SHELLOPTS):)pipefail:errexit .ONESHELL: diff --git a/dev/scripts/env.sh b/dev/scripts/env.sh index 81838d7d4..b12884910 100644 --- a/dev/scripts/env.sh +++ b/dev/scripts/env.sh @@ -4,10 +4,14 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" readonly SCRIPT_DIR readonly TMP_DIR="${SCRIPT_DIR}/../temp" +readonly SETUP_CHART_DIR="${SCRIPT_DIR}/../../charts/fullstack-cluster-setup" readonly CHART_DIR="${SCRIPT_DIR}/../../charts/hedera-network" +POD_MONITOR_ROLE="${POD_MONITOR_ROLE:-pod-monitor-role}" +GATEWAY_CLASS_NAME="${GATEWAY_CLASS_NAME:-fst-gateway-class}" # telemetry related env variables +readonly COMMON_RESOURCES="${SCRIPT_DIR}/../common-resources" readonly GATEWAY_API_DIR="${SCRIPT_DIR}/../gateway-api" readonly TELEMETRY_DIR="${SCRIPT_DIR}/../telemetry" readonly PROMETHEUS_DIR="${TELEMETRY_DIR}/prometheus" @@ -65,3 +69,13 @@ function setup() { } setup + +echo "--------------------------Env Setup: fullstack-testing ------------------------------------------------" +echo "CLUSTER_NAME: ${CLUSTER_NAME}" +echo "RELEASE_NAME: ${HELM_RELEASE_NAME}" +echo "USER: ${USER}" +echo "NAMESPACE: ${NAMESPACE}" +echo "SCRIPT_DIR: ${SCRIPT_DIR}" +echo "TMP_DIR: ${TMP_DIR}" +echo "-----------------------------------------------------------------------------------------------------" +echo "" diff --git a/dev/scripts/gateway.sh b/dev/scripts/gateway.sh index 7004dbfb6..5ac81f032 100644 --- a/dev/scripts/gateway.sh +++ b/dev/scripts/gateway.sh @@ -79,22 +79,25 @@ function deploy_envoy_gateway_api() { echo "Envoy Gateway API is already installed" echo "" fi + + get_gateway_status } function get_gateway_status() { echo "" helm list --all-namespaces | grep envoy-gateway - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------Gateway CRDs------------------------------------------------------------------------------" kubectl get crd - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------Gateway Class------------------------------------------------------------------------------" kubectl get gatewayclass - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------Gateway------------------------------------------------------------------------------" kubectl get gateway - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------HTTPRoute------------------------------------------------------------------------------" kubectl get httproute - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------GRPCRoute------------------------------------------------------------------------------" kubectl get grpcroute - echo "-----------------------------------------------------------------------------------------------------" + echo "-----------------------TCPRoute------------------------------------------------------------------------------" + kubectl get tcproute } function destroy_envoy_gateway_api() { @@ -111,9 +114,6 @@ function destroy_envoy_gateway_api() { kubectl delete ns gateway-system fi - uninstall_crd "gateway.networking.k8s.io" - uninstall_crd "gateway.envoyproxy.io" - echo "Envoy Gateway API is uninstalled" echo "" } @@ -140,7 +140,7 @@ function expose_envoy_gateway_svc() { unexpose_envoy_gateway_svc || true - ENVOY_SERVICE=$(kubectl get svc -n envoy-gateway-system --selector=gateway.envoyproxy.io/owning-gateway-namespace=default,gateway.envoyproxy.io/owning-gateway-name=fst -o jsonpath="{.items[0].metadata.name}" ) + ENVOY_SERVICE=$(kubectl get svc -n envoy-gateway-system --selector=gateway.envoyproxy.io/owning-gateway-namespace="${NAMESPACE}",gateway.envoyproxy.io/owning-gateway-name=fst -o jsonpath="{.items[0].metadata.name}" ) echo "" echo "Exposing Envoy Gateway Service: ${ENVOY_SERVICE} on ${local_port}:${gateway_port}" echo "-----------------------------------------------------------------------------------------------------" @@ -148,7 +148,7 @@ function expose_envoy_gateway_svc() { } function unexpose_envoy_gateway_svc() { - export GATEWAY_SVC_PID=$(ps aux | grep "kubectl port-forward svc/${ENVOY_SERVICE}" | sed -n 2p | awk '{ print $2 }') + export GATEWAY_SVC_PID=$(ps aux | grep "kubectl port-forward svc/${ENVOY_SERVICE}" | grep -v "grep" | sed -n 1p | awk '{ print $2 }') [[ -z "${GATEWAY_SVC_PID}" ]] && echo "No Envoy Gateway Service PID is found" && return 0 if [[ "${GATEWAY_SVC_PID}" ]]; then diff --git a/dev/scripts/main.sh b/dev/scripts/main.sh index 3baaa6a8d..6191b4a64 100644 --- a/dev/scripts/main.sh +++ b/dev/scripts/main.sh @@ -28,6 +28,52 @@ function destroy_cluster() { kubectl delete ns "${NAMESPACE}" || true } +function deploy_shared() { + deploy_fullstack_cluster_setup_chart +} + +function destroy_shared() { + destroy_fullstack_cluster_setup_chart +} + +function deploy_fullstack_cluster_setup_chart() { + setup_kubectl_context + + echo "Installing fullstack-cluster-setup chart" + echo "-----------------------------------------------------------------------------------------------------" + local count=$(helm list --all-namespaces -q | grep -c "fullstack-cluster-setup") + if [[ $count -eq 0 ]]; then + helm install -n "${NAMESPACE}" "fullstack-cluster-setup" "${SETUP_CHART_DIR}" + else + echo "fullstack-cluster-setup chart is already installed" + echo "" + fi + + echo "-----------------------Shared Resources------------------------------------------------------------------------------" + kubectl get clusterrole "${POD_MONITOR_ROLE}" -o wide + kubectl get gatewayclass + echo "" +} + +function destroy_fullstack_cluster_setup_chart() { + setup_kubectl_context + + echo "Uninstalling fullstack-cluster-setup chart" + echo "-----------------------------------------------------------------------------------------------------" + local count=$(helm list --all-namespaces -q | grep -c "fullstack-cluster-setup") + if [[ $count -ne 0 ]]; then + helm uninstall -n "${NAMESPACE}" "fullstack-cluster-setup" + else + echo "fullstack-cluster-setup chart is already installed" + echo "" + fi + + echo "-----------------------Shared Resources------------------------------------------------------------------------------" + kubectl get clusterrole "${POD_MONITOR_ROLE}" -o wide + kubectl get gatewayclass + echo "" +} + function install_chart() { local node_setup_script=$1 [[ -z "${node_setup_script}" ]] && echo "ERROR: [install_chart] Node setup script name is required" && return 1 @@ -39,10 +85,15 @@ function install_chart() { echo "SCRIPT_NAME: ${node_setup_script}" echo "Values: -f ${CHART_DIR}/values.yaml --values ${CHART_VALUES_FILES}" echo "-----------------------------------------------------------------------------------------------------" - if [ "${node_setup_script}" = "nmt-install.sh" ]; then - nmt_install + local count=$(helm list -q -n "${NAMESPACE}" | grep -c "${HELM_RELEASE_NAME}") + if [[ $count -eq 0 ]]; then + if [ "${node_setup_script}" = "nmt-install.sh" ]; then + nmt_install + else + direct_install + fi else - direct_install + echo "${HELM_RELEASE_NAME} is already installed" fi } @@ -50,10 +101,16 @@ function uninstall_chart() { [[ -z "${HELM_RELEASE_NAME}" ]] && echo "ERROR: [uninstall_chart] Helm release name is required" && return 1 echo "" - echo "Uninstalling helm chart... " - echo "-----------------------------------------------------------------------------------------------------" - helm uninstall "${HELM_RELEASE_NAME}" - sleep 10 + local count=$(helm list -q -n "${NAMESPACE}" | grep -c "${HELM_RELEASE_NAME}") + if [[ $count -ne 0 ]]; then + echo "Uninstalling helm chart ${HELM_RELEASE_NAME} in namespace ${NAMESPACE}... " + echo "-----------------------------------------------------------------------------------------------------" + helm uninstall -n "${NAMESPACE}" "${HELM_RELEASE_NAME}" + sleep 10 + echo "Uninstalled helm chart ${HELM_RELEASE_NAME} in namespace ${NAMESPACE}" + else + echo "Helm chart '${HELM_RELEASE_NAME}' not found in namespace ${NAMESPACE}. Nothing to uninstall. " + fi } function nmt_install() { @@ -87,10 +144,10 @@ function run_helm_chart_tests() { [[ -z "${test_name}" ]] && echo "ERROR: test name is required" && return 1 echo "" - echo "Running helm chart tests (first run takes ~2m)... " + echo "Running helm chart tests (takes ~5m, timeout 15m)... " echo "-----------------------------------------------------------------------------------------------------" - helm test "${HELM_RELEASE_NAME}" --filter name="${test_name}" + helm test "${HELM_RELEASE_NAME}" --filter name="${test_name}" --timeout 15m local test_status=$(kubectl get pod "${test_name}" -o jsonpath='{.status.phase}' | xargs) echo "Helm test status: ${test_status}" diff --git a/dev/scripts/template.env b/dev/scripts/template.env index b25b0b7f0..bbeb1109e 100644 --- a/dev/scripts/template.env +++ b/dev/scripts/template.env @@ -1,9 +1,12 @@ USER="${USER:-changeme}" -CLUSTER_NAME="fst" -NAMESPACE="fst-${USER}" -HELM_RELEASE_NAME="fst" +CLUSTER_NAME="${CLUSTER_NAME:-fst}" +NAMESPACE="${NAMESPACE:-fst-${USER}}" +HELM_RELEASE_NAME="${RELEASE_NAME:-fst}" NMT_VERSION=v2.0.0-alpha.0 PLATFORM_VERSION=v0.39.1 +POD_MONITOR_ROLE="${POD_MONITOR_ROLE:-pod-monitor-role}" +GATEWAY_CLASS_NAME="${GATEWAY_CLASS_NAME:-fst-gateway-class}" + #NODE_NAMES=(node0 node1 node2 node3) NODE_NAMES=(node0) diff --git a/dev/telemetry/grafana/example-tracing-app.yaml b/dev/telemetry/grafana/example-tracing-app.yaml index 7231b25e2..00a2b59ce 100644 --- a/dev/telemetry/grafana/example-tracing-app.yaml +++ b/dev/telemetry/grafana/example-tracing-app.yaml @@ -4,7 +4,6 @@ metadata: annotations: ingress.kubernetes.io/ssl-redirect: "false" name: ingress - namespace: default spec: rules: - http: @@ -21,7 +20,6 @@ apiVersion: apps/v1 kind: Deployment metadata: name: xk6-tracing - namespace: default spec: minReadySeconds: 10 replicas: 1