diff --git a/Dockerfile.loadtester b/Dockerfile.loadtester index 7c6c5e1d5..279e8a44c 100644 --- a/Dockerfile.loadtester +++ b/Dockerfile.loadtester @@ -21,6 +21,10 @@ WORKDIR /home/app RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \ chmod +x hey && mv hey /usr/local/bin/hey +RUN curl -sSL "https://get.helm.sh/helm-v2.12.3-linux-amd64.tar.gz" | tar xvz && \ +chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \ +rm -rf linux-amd64 + COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester . RUN chown -R app:app ./ diff --git a/Makefile b/Makefile index 2374df726..51f4e097d 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,7 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' TS=$(shell date +%Y-%m-%d_%H-%M-%S) run: - go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \ + go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \ -metrics-server=https://prometheus.istio.weavedx.com \ -slack-url=https://hooks.slack.com/services/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 \ -slack-channel="devops-alerts" @@ -106,6 +106,11 @@ reset-test: kubectl apply -f ./artifacts/namespaces kubectl apply -f ./artifacts/canaries +loadtester-run: + docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester + docker rm -f tester || true + docker run -dp 8888:9090 --name tester weaveworks/flagger-loadtester:$(LT_VERSION) + loadtester-push: docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester - docker push weaveworks/flagger-loadtester:$(LT_VERSION) \ No newline at end of file + docker push weaveworks/flagger-loadtester:$(LT_VERSION) diff --git a/artifacts/canaries/canary.yaml b/artifacts/canaries/canary.yaml index ca023e05d..046b23699 100644 --- a/artifacts/canaries/canary.yaml +++ b/artifacts/canaries/canary.yaml @@ -73,5 +73,5 @@ spec: timeout: 5s metadata: type: cmd - cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/" + cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/" logCmdOutput: "true" diff --git a/artifacts/helmtester/deployment.yaml b/artifacts/helmtester/deployment.yaml new file mode 100644 index 000000000..a8906e177 --- /dev/null +++ b/artifacts/helmtester/deployment.yaml @@ -0,0 +1,58 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: flagger-helmtester + namespace: kube-system + labels: + app: flagger-helmtester +spec: + selector: + matchLabels: + app: flagger-helmtester + template: + metadata: + labels: + app: flagger-helmtester + annotations: + prometheus.io/scrape: "true" + spec: + serviceAccountName: tiller + containers: + - name: helmtester + image: weaveworks/flagger-loadtester:0.4.0 + imagePullPolicy: IfNotPresent + ports: + - name: http + containerPort: 8080 + command: + - ./loadtester + - -port=8080 + - -log-level=info + - -timeout=1h + livenessProbe: + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=4 + - --spider + - http://localhost:8080/healthz + timeoutSeconds: 5 + readinessProbe: + exec: + command: + - wget + - --quiet + - --tries=1 + - --timeout=4 + - --spider + - http://localhost:8080/healthz + timeoutSeconds: 5 + resources: + limits: + memory: "512Mi" + cpu: "1000m" + requests: + memory: "32Mi" + cpu: "10m" diff --git a/artifacts/helmtester/service.yaml b/artifacts/helmtester/service.yaml new file mode 100644 index 000000000..61d8c2286 --- /dev/null +++ b/artifacts/helmtester/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: flagger-helmtester + namespace: kube-system + labels: + app: flagger-helmtester +spec: + type: ClusterIP + selector: + app: flagger-helmtester + ports: + - name: http + port: 80 + protocol: TCP + targetPort: http \ No newline at end of file diff --git a/charts/loadtester/Chart.yaml b/charts/loadtester/Chart.yaml index 36ea2f428..3772b6618 100644 --- a/charts/loadtester/Chart.yaml +++ b/charts/loadtester/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 name: loadtester -version: 0.4.0 -appVersion: 0.3.0 +version: 0.4.1 +appVersion: 0.4.0 kubeVersion: ">=1.11.0-0" engine: gotpl description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook. diff --git a/charts/loadtester/README.md b/charts/loadtester/README.md index cb891782f..b1172617e 100644 --- a/charts/loadtester/README.md +++ b/charts/loadtester/README.md @@ -7,7 +7,6 @@ and can be used to generates traffic during canary analysis when configured as a ## Prerequisites * Kubernetes >= 1.11 -* Istio >= 1.0 ## Installing the Chart @@ -25,7 +24,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester The command deploys Grafana on the Kubernetes cluster in the default namespace. -> **Tip**: Note that the namespace where you deploy the load tester should have the Istio sidecar injection enabled +> **Tip**: Note that the namespace where you deploy the load tester should have the Istio or App Mesh sidecar injection enabled The [configuration](#configuration) section lists the parameters that can be configured during installation. @@ -48,13 +47,14 @@ Parameter | Description | Default `image.repository` | Image repository | `quay.io/stefanprodan/flagger-loadtester` `image.pullPolicy` | Image pull policy | `IfNotPresent` `image.tag` | Image tag | `` -`replicaCount` | desired number of pods | `1` +`replicaCount` | Desired number of pods | `1` +`serviceAccountName` | Kubernetes service account name | `none` `resources.requests.cpu` | CPU requests | `10m` -`resources.requests.memory` | memory requests | `64Mi` +`resources.requests.memory` | Memory requests | `64Mi` `tolerations` | List of node taints to tolerate | `[]` `affinity` | node/pod affinities | `node` -`nodeSelector` | node labels for pod assignment | `{}` -`service.type` | type of service | `ClusterIP` +`nodeSelector` | Node labels for pod assignment | `{}` +`service.type` | Type of service | `ClusterIP` `service.port` | ClusterIP port | `80` `cmd.timeout` | Command execution timeout | `1h` `logLevel` | Log level can be debug, info, warning, error or panic | `info` diff --git a/charts/loadtester/templates/deployment.yaml b/charts/loadtester/templates/deployment.yaml index 75a91abe1..ec7fef883 100644 --- a/charts/loadtester/templates/deployment.yaml +++ b/charts/loadtester/templates/deployment.yaml @@ -19,6 +19,9 @@ spec: annotations: appmesh.k8s.aws/ports: "444" spec: + {{- if .Values.serviceAccountName }} + serviceAccountName: {{ .Values.serviceAccountName }} + {{- end }} containers: - name: {{ .Chart.Name }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" diff --git a/charts/loadtester/values.yaml b/charts/loadtester/values.yaml index 9399a8ade..486ab92bc 100644 --- a/charts/loadtester/values.yaml +++ b/charts/loadtester/values.yaml @@ -2,7 +2,7 @@ replicaCount: 1 image: repository: weaveworks/flagger-loadtester - tag: 0.3.0 + tag: 0.4.0 pullPolicy: IfNotPresent logLevel: info @@ -27,6 +27,8 @@ tolerations: [] affinity: {} +serviceAccountName: "" + # App Mesh virtual node settings meshName: "" #backends: diff --git a/charts/podinfo/Chart.yaml b/charts/podinfo/Chart.yaml index f2d33f2ea..eda7dc665 100644 --- a/charts/podinfo/Chart.yaml +++ b/charts/podinfo/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -version: 2.0.1 +version: 2.1.0 appVersion: 1.4.0 name: podinfo engine: gotpl diff --git a/charts/podinfo/templates/canary.yaml b/charts/podinfo/templates/canary.yaml index 9fb007ec9..08107f7f5 100644 --- a/charts/podinfo/templates/canary.yaml +++ b/charts/podinfo/templates/canary.yaml @@ -38,8 +38,17 @@ spec: - name: request-duration threshold: {{ .Values.canary.thresholds.latency }} interval: 1m - {{- if .Values.canary.loadtest.enabled }} webhooks: + {{- if .Values.canary.helmtest.enabled }} + - name: "helm test" + type: pre-rollout + url: {{ .Values.canary.helmtest.url }} + timeout: 3m + metadata: + type: "helm" + cmd: "test {{ .Release.Name }} --cleanup" + {{- end }} + {{- if .Values.canary.loadtest.enabled }} - name: load-test-get url: {{ .Values.canary.loadtest.url }} timeout: 5s @@ -50,5 +59,5 @@ spec: timeout: 5s metadata: cmd: "hey -z 1m -q 5 -c 2 -m POST -d '{\"test\": true}' http://{{ template "podinfo.fullname" . }}.{{ .Release.Namespace }}:{{ .Values.service.port }}/echo" - {{- end }} + {{- end }} {{- end }} \ No newline at end of file diff --git a/charts/podinfo/values.yaml b/charts/podinfo/values.yaml index 2aaa0f3b3..b1af8b6fa 100644 --- a/charts/podinfo/values.yaml +++ b/charts/podinfo/values.yaml @@ -45,6 +45,10 @@ canary: enabled: false # load tester address url: http://flagger-loadtester.test/ + helmtest: + enabled: false + # helm tester address + url: http://flagger-helmtester.kube-system/ resources: limits: diff --git a/cmd/loadtester/main.go b/cmd/loadtester/main.go index 86b0ae872..962397b19 100644 --- a/cmd/loadtester/main.go +++ b/cmd/loadtester/main.go @@ -10,7 +10,7 @@ import ( "time" ) -var VERSION = "0.3.0" +var VERSION = "0.4.0" var ( logLevel string port string diff --git a/docs/gitbook/how-it-works.md b/docs/gitbook/how-it-works.md index 3a08f8ebe..d8b6d66d4 100644 --- a/docs/gitbook/how-it-works.md +++ b/docs/gitbook/how-it-works.md @@ -593,11 +593,11 @@ Spec: webhooks: - name: "smoke test" type: pre-rollout - url: http://migration-check.db/query - timeout: 30s + url: http://flagger-helmtester.kube-system/ + timeout: 3m metadata: - key1: "val1" - key2: "val2" + type: "helm" + cmd: "test podinfo --cleanup" - name: "load test" type: rollout url: http://flagger-loadtester.test/ @@ -640,7 +640,7 @@ On a non-2xx response Flagger will include the response body (if any) in the fai For workloads that are not receiving constant traffic Flagger can be configured with a webhook, that when called, will start a load test for the target workload. If the target workload doesn't receive any traffic during the canary analysis, -Flagger metric checks will fail with "no values found for metric istio_requests_total". +Flagger metric checks will fail with "no values found for metric request-success-rate". Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey) that generates traffic during analysis when configured as a webhook. @@ -677,18 +677,18 @@ webhooks: timeout: 5s metadata: type: cmd - cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/" + cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/" - name: load-test-post url: http://flagger-loadtester.test/ timeout: 5s metadata: type: cmd - cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo" + cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo-canary.test:9898/echo" ``` When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands in the background, if they are not already running. This will ensure that during the -analysis, the `podinfo.test` virtual service will receive a steady stream of GET and POST requests. +analysis, the `podinfo-canary.test` service will receive a steady stream of GET and POST requests. If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the public URL and use HTTP2. @@ -707,7 +707,7 @@ The load tester can run arbitrary commands as long as the binary is present in t For example if you you want to replace `hey` with another CLI, you can create your own Docker image: ```dockerfile -FROM quay.io/stefanprodan/flagger-loadtester: +FROM weaveworks/flagger-loadtester: RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \ && chmod +x /usr/local/bin/my-cli @@ -741,3 +741,52 @@ webhooks: When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest) to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops. + +### Integration Testing + +Flagger comes with a testing service that can run Helm tests or Bats tests when configured as a webhook. + +Deploy the Helm test runner in the `kube-system` namespace using the `tiller` service account: + +```bash +helm repo add flagger https://flagger.app + +helm upgrade -i flagger-helmtester flagger/loadtester \ +--namespace=kube-system \ +--set serviceAccountName=tiller +``` + +When deployed the Helm tester API will be available at `http://flagger-helmtester.kube-system/`. + +Now you can add pre-rollout webhooks to the canary analysis spec: + +```yaml + canaryAnalysis: + webhooks: + - name: "smoke test" + type: pre-rollout + url: http://flagger-helmtester.kube-system/ + timeout: 3m + metadata: + type: "helm" + cmd: "test {{ .Release.Name }} --cleanup" +``` + +When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. +If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back. + +As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests. + +```yaml + canaryAnalysis: + webhooks: + - name: "acceptance tests" + type: pre-rollout + url: http://flagger-batstester.default/ + timeout: 5m + metadata: + type: "bash" + cmd: "bats /tests/acceptance.bats" +``` + +Note that you should create a ConfigMap with your Bats tests and mount it inside the tester container. diff --git a/docs/gitbook/tutorials/canary-helm-gitops.md b/docs/gitbook/tutorials/canary-helm-gitops.md index 9117f1ffc..ca7f7f938 100644 --- a/docs/gitbook/tutorials/canary-helm-gitops.md +++ b/docs/gitbook/tutorials/canary-helm-gitops.md @@ -18,7 +18,11 @@ and the canary configuration file. │   ├── canary.yaml │   ├── configmap.yaml │   ├── deployment.yaml -│   └── hpa.yaml +│   ├── hpa.yaml +│   ├── service.yaml +│   └── tests +│   ├── test-config.yaml +│   └── test-pod.yaml └── values.yaml ``` @@ -47,7 +51,7 @@ helm upgrade -i frontend flagger/podinfo \ --namespace test \ --set nameOverride=frontend \ --set backend=http://backend.test:9898/echo \ ---set canary.enabled=true \ +--set canary.loadtest.enabled=true \ --set canary.istioIngress.enabled=true \ --set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \ --set canary.istioIngress.host=frontend.istio.example.com @@ -87,7 +91,7 @@ Now let's install the `backend` release without exposing it outside the mesh: helm upgrade -i backend flagger/podinfo \ --namespace test \ --set nameOverride=backend \ ---set canary.enabled=true \ +--set canary.loadtest.enabled=true \ --set canary.istioIngress.enabled=false ``` @@ -118,17 +122,26 @@ helm upgrade -i flagger-loadtester flagger/loadtester \ --namespace=test ``` -Enable the load tester and deploy a new `frontend` version: +Install Flagger's helm test runner in the `kube-system` using `tiller` service account: + +```bash +helm upgrade -i flagger-helmtester flagger/loadtester \ +--namespace=kube-system \ +--set serviceAccountName=tiller +``` + +Enable the load and helm tester and deploy a new `frontend` version: ```bash helm upgrade -i frontend flagger/podinfo/ \ --namespace test \ --reuse-values \ --set canary.loadtest.enabled=true \ +--set canary.helmtest.enabled=true \ --set image.tag=1.4.1 ``` -Flagger detects that the deployment revision changed and starts the canary analysis along with the load test: +Flagger detects that the deployment revision changed and starts the canary analysis: ``` kubectl -n istio-system logs deployment/flagger -f | jq .msg @@ -136,6 +149,7 @@ kubectl -n istio-system logs deployment/flagger -f | jq .msg New revision detected! Scaling up frontend.test Halt advancement frontend.test waiting for rollout to finish: 0 of 2 updated replicas are available Starting canary analysis for frontend.test +Pre-rollout check helm test passed Advance frontend.test canary weight 5 Advance frontend.test canary weight 10 Advance frontend.test canary weight 15 @@ -163,7 +177,7 @@ Now trigger a canary deployment for the `backend` app, but this time you'll chan helm upgrade -i backend flagger/podinfo/ \ --namespace test \ --reuse-values \ ---set canary.loadtest.enabled=true \ +--set canary.helmtest.enabled=true \ --set httpServer.timeout=25s ``` @@ -253,7 +267,8 @@ Create a git repository with the following content: └── test ├── backend.yaml ├── frontend.yaml - └── loadtester.yaml + ├── loadtester.yaml + └── helmtester.yaml ``` You can find the git source [here](https://github.com/stefanprodan/flagger/tree/master/artifacts/cluster). @@ -288,6 +303,8 @@ spec: host: frontend.istio.example.com loadtest: enabled: true + helmtest: + enabled: true ``` In the `chart` section I've defined the release source by specifying the Helm repository (hosted on GitHub Pages), chart name and version. @@ -333,6 +350,7 @@ A CI/CD pipeline for the `frontend` release could look like this: * Flux applies the updated Helm release on the cluster * Flux Helm Operator picks up the change and calls Tiller to upgrade the release * Flagger detects a revision change and scales up the `frontend` deployment +* Flagger runs the helm test before routing traffic to the canary service * Flagger starts the load test and runs the canary analysis * Based on the analysis result the canary deployment is promoted to production or rolled back * Flagger sends a Slack notification with the canary result @@ -343,11 +361,12 @@ A canary deployment can fail due to any of the following reasons: * the container image can't be downloaded * the deployment replica set is stuck for more then ten minutes (eg. due to a container crash loop) -* the webooks (acceptance tests, load tests, etc) are returning a non 2xx response +* the webooks (acceptance tests, helm tests, load tests, etc) are returning a non 2xx response * the HTTP success rate (non 5xx responses) metric drops under the threshold * the HTTP average duration metric goes over the threshold * the Istio telemetry service is unable to collect traffic metrics * the metrics server (Prometheus) can't be reached -If you want to find out more about managing Helm releases with Flux here is an in-depth guide -[github.com/stefanprodan/gitops-helm](https://github.com/stefanprodan/gitops-helm). +If you want to find out more about managing Helm releases with Flux here are two in-depth guides: +[gitops-helm](https://github.com/stefanprodan/gitops-helm) and +[gitops-istio](https://github.com/stefanprodan/gitops-istio). diff --git a/docs/gitbook/usage/progressive-delivery.md b/docs/gitbook/usage/progressive-delivery.md index 0c6936577..ab6cf1256 100644 --- a/docs/gitbook/usage/progressive-delivery.md +++ b/docs/gitbook/usage/progressive-delivery.md @@ -85,7 +85,7 @@ spec: url: http://flagger-loadtester.test/ timeout: 5s metadata: - cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/" + cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/" ``` Save the above resource as podinfo-canary.yaml and then apply it: diff --git a/pkg/loadtester/bats.go b/pkg/loadtester/bash.go similarity index 76% rename from pkg/loadtester/bats.go rename to pkg/loadtester/bash.go index d27eb4b02..229bda4fc 100644 --- a/pkg/loadtester/bats.go +++ b/pkg/loadtester/bash.go @@ -6,19 +6,19 @@ import ( "os/exec" ) -const TaskTypeBats = "bats" +const TaskTypeBash = "bash" -type BatsTask struct { +type BashTask struct { TaskBase command string logCmdOutput bool } -func (task *BatsTask) Hash() string { +func (task *BashTask) Hash() string { return hash(task.canary + task.command) } -func (task *BatsTask) Run(ctx context.Context) (bool, error) { +func (task *BashTask) Run(ctx context.Context) (bool, error) { cmd := exec.CommandContext(ctx, "bash", "-c", task.command) out, err := cmd.CombinedOutput() @@ -34,6 +34,6 @@ func (task *BatsTask) Run(ctx context.Context) (bool, error) { return true, nil } -func (task *BatsTask) String() string { +func (task *BashTask) String() string { return task.command } diff --git a/pkg/loadtester/helm.go b/pkg/loadtester/helm.go new file mode 100644 index 000000000..34afde01a --- /dev/null +++ b/pkg/loadtester/helm.go @@ -0,0 +1,42 @@ +package loadtester + +import ( + "context" + "fmt" + "os/exec" + "strings" +) + +const TaskTypeHelm = "helm" + +type HelmTask struct { + TaskBase + command string + logCmdOutput bool +} + +func (task *HelmTask) Hash() string { + return hash(task.canary + task.command) +} + +func (task *HelmTask) Run(ctx context.Context) (bool, error) { + helmCmd := fmt.Sprintf("helm %s", task.command) + task.logger.With("canary", task.canary).Infof("running command %v", helmCmd) + + cmd := exec.CommandContext(ctx, "helm", strings.Fields(task.command)...) + out, err := cmd.CombinedOutput() + if err != nil { + task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out) + return false, fmt.Errorf(" %v %v", err, out) + } else { + if task.logCmdOutput { + fmt.Printf("%s\n", out) + } + task.logger.With("canary", task.canary).Infof("command finished %v", helmCmd) + } + return true, nil +} + +func (task *HelmTask) String() string { + return task.command +} diff --git a/pkg/loadtester/server.go b/pkg/loadtester/server.go index c5a34aae8..c8ce4c1da 100644 --- a/pkg/loadtester/server.go +++ b/pkg/loadtester/server.go @@ -46,10 +46,16 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge } // run bats command (blocking task) - if typ == TaskTypeBats { - bats := BatsTask{ + if typ == TaskTypeBash { + logger.With("canary", payload.Name).Infof("bats command %s", payload.Metadata["cmd"]) + + bats := BashTask{ command: payload.Metadata["cmd"], - logCmdOutput: taskRunner.logCmdOutput, + logCmdOutput: true, + TaskBase: TaskBase{ + canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace), + logger: logger, + }, } ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout) @@ -59,6 +65,32 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge if !ok { w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) + return + } + + w.WriteHeader(http.StatusOK) + return + } + + // run helm command (blocking task) + if typ == TaskTypeHelm { + helm := HelmTask{ + command: payload.Metadata["cmd"], + logCmdOutput: true, + TaskBase: TaskBase{ + canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace), + logger: logger, + }, + } + + ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout) + defer cancel() + + ok, err := helm.Run(ctx) + if !ok { + w.WriteHeader(http.StatusInternalServerError) + w.Write([]byte(err.Error())) + return } w.WriteHeader(http.StatusOK)