diff --git a/CHANGELOG/CHANGELOG-1.2.0.md b/CHANGELOG/CHANGELOG-1.2.0.md new file mode 100644 index 0000000..216c737 --- /dev/null +++ b/CHANGELOG/CHANGELOG-1.2.0.md @@ -0,0 +1,5 @@ +### 1.2.0 Changelog + +* Stop populating and using the `loftsman.io/previous-data` annotation when creating or updating the ship result configmap. +* Removed the loftsman log output from the ship result configmap. +* Added a new configmap to store the loftsman log output from a ship result into a separate configmap. diff --git a/docs/README.md b/docs/README.md index 711b001..74b46fe 100644 --- a/docs/README.md +++ b/docs/README.md @@ -154,34 +154,34 @@ So, let's run `loftsman ship` with our defined manifest: ``` $ loftsman ship --manifest-path ./manifest.yaml -2021-09-21T11:41:02-06:00 INF Initializing the connection to the Kubernetes cluster using KUBECONFIG (system default), and context (current-context) command=ship -2021-09-21T11:41:02-06:00 INF Initializing helm client object command=ship +2021-12-09T14:08:30-06:00 INF Initializing the connection to the Kubernetes cluster using KUBECONFIG (system default), and context (current-context) command=ship +2021-12-09T14:08:30-06:00 INF Initializing helm client object command=ship |\ | \ | \ |___\ Shipping your Helm workloads with Loftsman \--||___/ ~~~~~~\_____/~~~~~~~ - -2021-09-21T11:41:02-06:00 INF Ensuring that the loftsman namespace exists command=ship -2021-09-21T11:41:02-06:00 INF Running a release for the provided manifest at ./manifest.yaml command=ship + +2021-12-09T14:08:31-06:00 INF Ensuring that the loftsman namespace exists command=ship +2021-12-09T14:08:31-06:00 INF Running a release for the provided manifest at manifest.yaml command=ship ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Releasing consul v0.33.0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -2021-09-21T11:41:02-06:00 INF Running helm install/upgrade with arguments: upgrade --install consul https://helm.releases.hashicorp.com/consul-0.33.0.tgz --namespace default --create-namespace --set global.chart.name=consul --set global.chart.version=0.33.0 chart=consul command=ship namespace=default version=0.33.0 -2021-09-21T11:41:07-06:00 INF Release "consul" does not exist. Installing it now. +2021-12-09T14:08:31-06:00 INF Running helm install/upgrade with arguments: upgrade --install consul https://helm.releases.hashicorp.com/consul-0.33.0.tgz --namespace default --create-namespace --set global.chart.name=consul --set global.chart.version=0.33.0 chart=consul command=ship namespace=default version=0.33.0 +2021-12-09T14:08:37-06:00 INF Release "consul" has been upgraded. Happy Helming! NAME: consul -LAST DEPLOYED: Tue Sep 21 11:41:06 2021 +LAST DEPLOYED: Thu Dec 9 14:08:37 2021 NAMESPACE: default STATUS: deployed -REVISION: 1 +REVISION: 3 NOTES: Thank you for installing HashiCorp Consul! -Now that you have deployed Consul, you should look over the docs on using -Consul with Kubernetes available here: +Now that you have deployed Consul, you should look over the docs on using +Consul with Kubernetes available here: https://www.consul.io/docs/platform/k8s/index.html @@ -198,13 +198,13 @@ To learn more about the release, run: Releasing victoria-metrics-cluster v0.8.24 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -2021-09-21T11:41:07-06:00 INF Running helm install/upgrade with arguments: upgrade --install victoria-metrics-cluster charts/victoria-metrics-cluster-0.8.24.tgz --namespace default --create-namespace --set global.chart.name=victoria-metrics-cluster --set global.chart.version=0.8.24 chart=victoria-metrics-cluster command=ship namespace=default version=0.8.24 -2021-09-21T11:41:09-06:00 INF Release "victoria-metrics-cluster" does not exist. Installing it now. +2021-12-09T14:08:37-06:00 INF Running helm install/upgrade with arguments: upgrade --install victoria-metrics-cluster charts/victoria-metrics-cluster-0.8.24.tgz --namespace default --create-namespace --set global.chart.name=victoria-metrics-cluster --set global.chart.version=0.8.24 chart=victoria-metrics-cluster command=ship namespace=default version=0.8.24 +2021-12-09T14:08:39-06:00 INF Release "victoria-metrics-cluster" has been upgraded. Happy Helming! NAME: victoria-metrics-cluster -LAST DEPLOYED: Tue Sep 21 11:41:09 2021 +LAST DEPLOYED: Thu Dec 9 14:08:38 2021 NAMESPACE: default STATUS: deployed -REVISION: 1 +REVISION: 3 TEST SUITE: None NOTES: Write API: @@ -215,13 +215,15 @@ victoria-metrics-cluster-vminsert.default.svc.cluster.local [redacted] chart=victoria-metrics-cluster command=ship namespace=default version=0.8.24 -2021-09-21T11:41:09-06:00 INF Ship status: success. Recording status, manifest, and log data to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Ship status: success. Recording status, manifest to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Recording log data to configmap loftsman-my-first-manifest-ship-log in namespace loftsman command=ship ``` Great! We've shipped our system which includes two charts with various workloads in this example. We have a result and record of the ship operation per our logs: ``` -2021-09-21T11:41:09-06:00 INF Ship status: success. Recording status, manifest, and log data to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Ship status: success. Recording status, manifest to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Recording log data to configmap loftsman-my-first-manifest-ship-log in namespace loftsman command=ship ``` _NOTE: if any of our Helm charts had failed to install, we'd get a clear indication of that at the end of this log. Loftsman won't consider an install of one chart an overall failure, rather will aggregate these failures and report them at the end of the log._ @@ -259,107 +261,95 @@ Now that we've shipped our first manifest, we're in good shape. But there are ot We were able to see in the logs from our first `loftsman ship` operation: ``` -2021-09-21T11:41:09-06:00 INF Ship status: success. Recording status, manifest, and log data to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Ship status: success. Recording status, manifest to configmap loftsman-my-first-manifest in namespace loftsman command=ship +2021-12-09T14:08:39-06:00 INF Recording log data to configmap loftsman-my-first-manifest-ship-log in namespace loftsman command=ship ``` -So, let's take a closer look at that Kubernetes-stored `ConfigMap`: +#### Ship result configmap +So, let's take a closer look at that Kubernetes-stored `ConfigMap` that contains the ship result: -``` +```yaml apiVersion: v1 -items: -- apiVersion: v1 - data: - ca.crt: | - -----BEGIN CERTIFICATE----- - MIIC5zCCAc+gAwIBAgIBADANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwprdWJl - cm5ldGVzMB4XDTIxMDkyMTE3MzMxMFoXDTMxMDkxOTE3MzMxMFowFTETMBEGA1UE - AxMKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALiV - Pae2nObpj6CXw4GSCfEX9bVBQ+gOZyMqiJhcBHlUQwSe7sMvTk1HzRyKTWsQmiKm - 5s5FT9wB+2yZwZmQ5ZgyArxybpTe6kILWPKTww/jAMu25gC+nPyCWmSfmpeM9pa9 - qDD+XL8p2XsnEAQE41jHKF0uKT1pU0ybYmy9NlYfrxzjvxEG/+qubgGs0SNwf1Lu - C+GmHENE2NgLkDIhCvcU+8Wz+ucQyW2QRd4E3KAdMykyVj91a2tkmpQmwisUXY4Z - QusXFmjFOLd3bJ6f9oEERAUWd4zKvgXUcm8q5j83ued9XU7W9sQU8XdZzvQ9hQkZ - nsSQjyUjg+y1ha3yOI0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB - /wQFMAMBAf8wHQYDVR0OBBYEFAxwHq90FafTk3ZPkOxB4sukjMivMA0GCSqGSIb3 - DQEBCwUAA4IBAQB1z7RfxCHBTJ/D734GvxYXSBSUkNe5Z6i8ygcFGGEdrUB+a02G - zY6FFOmyE/+qg86oOX2+k55wBZno7m4d9rQzloxs9r4HUDzE7Zjyl7MtMi+dMNLE - 4CqD85+75UBhW/IF2MLT/Bz0iI6CMsB/9dmJUREPTFAaXkyZ+0kxRz8JxszR6iqa - pMYecJ/cFo3z+U4a+irrdH+DR0AR7DvQ9vvzZWLNxK4z3RMBjIhb8kQlAaDyS+yq - NiFW6tjcedhPhkT1eL9gs91iAQkoMH3+WfWW4n9434hEK08zoQb674C/ZfSFtSHw - 0CpdmpRPatWJJiuD4IQESq6tamklsdZl5Xs2 - -----END CERTIFICATE----- - kind: ConfigMap - metadata: - creationTimestamp: "2021-09-21T17:41:02Z" - name: kube-root-ca.crt - namespace: loftsman - resourceVersion: "1095" - uid: 17201146-b75d-41f8-8bbe-8a1b1755243c -- apiVersion: v1 - data: - loftsman.log: | - {"level":"info","command":"ship","time":"2021-09-21T11:41:02-06:00","message":"Initializing the connection to the Kubernetes cluster using KUBECONFIG (system default), and context (current-context)"} - {"level":"info","command":"ship","time":"2021-09-21T11:41:02-06:00","message":"Initializing helm client object"} - {"command":"ship","header":"Shipping your Helm workloads with Loftsman","time":"2021-09-21T11:41:02-06:00"} - {"level":"info","command":"ship","time":"2021-09-21T11:41:02-06:00","message":"Ensuring that the loftsman namespace exists"} - {"level":"info","command":"ship","time":"2021-09-21T11:41:02-06:00","message":"Running a release for the provided manifest at ./manifest.yaml"} - {"command":"ship","sub-header":"Releasing consul v0.33.0","time":"2021-09-21T11:41:02-06:00"} - {"level":"info","command":"ship","chart":"consul","version":"0.33.0","namespace":"default","time":"2021-09-21T11:41:02-06:00","message":"Running helm install/upgrade with arguments: upgrade --install consul https://helm.releases.hashicorp.com/consul-0.33.0.tgz --namespace default --create-namespace --set global.chart.name=consul --set global.chart.version=0.33.0"} - {"level":"info","command":"ship","chart":"consul","version":"0.33.0","namespace":"default","time":"2021-09-21T11:41:07-06:00","message":"Release \"consul\" does not exist. Installing it now.\nNAME: consul\nLAST DEPLOYED: Tue Sep 21 11:41:06 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nNOTES:\nThank you for installing HashiCorp Consul!\n\nNow that you have deployed Consul, you should look over the docs on using \nConsul with Kubernetes available here: \n\nhttps://www.consul.io/docs/platform/k8s/index.html\n\n\nYour release is named consul.\n\nTo learn more about the release, run:\n\n $ helm status consul\n $ helm get all consul\n"} - {"command":"ship","sub-header":"Releasing victoria-metrics-cluster v0.8.24","time":"2021-09-21T11:41:07-06:00"} - {"level":"info","command":"ship","chart":"victoria-metrics-cluster","version":"0.8.24","namespace":"default","time":"2021-09-21T11:41:07-06:00","message":"Running helm install/upgrade with arguments: upgrade --install victoria-metrics-cluster charts/victoria-metrics-cluster-0.8.24.tgz --namespace default --create-namespace --set global.chart.name=victoria-metrics-cluster --set global.chart.version=0.8.24"} - {"level":"info","command":"ship","chart":"victoria-metrics-cluster","version":"0.8.24","namespace":"default","time":"2021-09-21T11:41:09-06:00","message":"Release \"victoria-metrics-cluster\" does not exist. Installing it now.\nNAME: victoria-metrics-cluster\nLAST DEPLOYED: Tue Sep 21 11:41:09 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 1\nTEST SUITE: None\nNOTES:\nWrite API:\n\nThe Victoria Metrics write api can be accessed via port 8480 on the following DNS name from within your cluster:\nvictoria-metrics-cluster-vminsert.default.svc.cluster.local\n\nGet the Victoria Metrics insert service URL by running these commands in the same shell:\n export POD_NAME=$(kubectl get pods --namespace default -l \"app=vminsert\" -o jsonpath=\"{.items[0].metadata.name}\")\n kubectl --namespace default port-forward $POD_NAME 8480\n\nYou need to update your prometheus configuration file and add next lines into it:\n\nprometheus.yml\n```yaml\nremote_write:\n - url: \"http:///insert/0/prometheus/\"\n\n```\n\nfor e.g. inside the kubernetes cluster:\n```yaml\nremote_write:\n - url: \"http://victoria-metrics-cluster-vminsert.default.svc.cluster.local:8480/insert/0/prometheus/\"\n\n```\nRead API:\n\nThe Victoria Metrics read api can be accessed via port 8481 on the following DNS name from within your cluster:\nvictoria-metrics-cluster-vmselect.default.svc.cluster.local\n\nGet the Victoria Metrics select service URL by running these commands in the same shell:\n export POD_NAME=$(kubectl get pods --namespace default -l \"app=vmselect\" -o jsonpath=\"{.items[0].metadata.name}\")\n kubectl --namespace default port-forward $POD_NAME 8481\n\nYou need to update specify select service URL in your Grafana:\n NOTE: you need to use Prometheus Data Source\n\nInput for URL field in Grafana\n\n```\nhttp:///select/0/prometheus/\n```\n\nfor e.g. inside the kubernetes cluster:\n```\nhttp://victoria-metrics-cluster-vmselect.default.svc.cluster.local:8481/select/0/prometheus/\"\n```\n"} - {"level":"info","command":"ship","time":"2021-09-21T11:41:09-06:00","message":"Ship status: success. Recording status, manifest, and log data to configmap loftsman-my-first-manifest in namespace loftsman"} - manifest.yaml: | - apiVersion: manifests/v1beta1 - metadata: - name: my-first-manifest - spec: - sources: - charts: - - type: directory - name: local - location: ./charts - - type: repo - name: hashicorp - location: https://helm.releases.hashicorp.com +data: + manifest.yaml: |- + apiVersion: manifests/v1beta1 + metadata: + name: my-first-manifest + spec: + sources: charts: - - name: consul - source: hashicorp - version: 0.33.0 - namespace: default - - name: victoria-metrics-cluster - source: local - version: 0.8.24 - namespace: default - status: success - kind: ConfigMap - metadata: - annotations: - loftsman.io/previous-data: "" - creationTimestamp: "2021-09-21T17:41:02Z" - labels: - app.kubernetes.io/managed-by: loftsman - name: loftsman-my-first-manifest - namespace: loftsman - resourceVersion: "1216" - uid: 071e1ffc-2a86-4d32-b46c-7b17db680256 -kind: List + - type: directory + name: local + location: ./charts + - type: repo + name: hashicorp + location: https://helm.releases.hashicorp.com + charts: + - name: consul + source: hashicorp + version: 0.33.0 + namespace: default + - name: victoria-metrics-cluster + source: local + version: 0.8.24 + namespace: default + status: success +kind: ConfigMap metadata: - resourceVersion: "" - selfLink: "" + annotations: + loftsman.io/ship-log-configmap: loftsman-my-first-manifest-ship-log + creationTimestamp: "2021-12-09T20:07:14Z" + labels: + app.kubernetes.io/managed-by: loftsman + name: loftsman-my-first-manifest + namespace: loftsman + resourceVersion: "16711" + uid: d3274d10-f5b2-4538-8f05-0c483fdb7157 ``` Let's look at all the individual pieces: * `metadata.name`: `loftsman-my-first-manifest`, a loftsman generated resource name based on the name of your manifest. Again, Loftsman considers manifests unique and connected across ship operations based on the name of the manifest itself * `namespace`: `loftsman`, by default, loftsman will store everything it needs to in the `loftsman` namespace. You can control what namespace to use via the CLI `--loftsman-namespace` argument. -* `data."loftsman.log"`: is a record of the full log of the `loftsman ship` run, in JSON/machine-readable log format * `data."manifest.yaml"`: a record of the actual manifest shipped for this run * `data.success`: whether or not the ship was successful or encountered failures This `ConfigMap` will currently store the last ship data, think of it as state of a shipped manifest. +#### Ship log configmap + +So, let's take a closer look at that Kubernetes-stored `ConfigMap` that contains the logs from the ship operation: +```yaml +apiVersion: v1 +data: + loftsman.log: | + {"level":"info","command":"ship","time":"2021-12-09T14:08:30-06:00","message":"Initializing the connection to the Kubernetes cluster using KUBECONFIG (system default), and context (current-context)"} + {"level":"info","command":"ship","time":"2021-12-09T14:08:30-06:00","message":"Initializing helm client object"} + {"command":"ship","header":"Shipping your Helm workloads with Loftsman","time":"2021-12-09T14:08:31-06:00"} + {"level":"info","command":"ship","time":"2021-12-09T14:08:31-06:00","message":"Ensuring that the loftsman namespace exists"} + {"level":"info","command":"ship","time":"2021-12-09T14:08:31-06:00","message":"Running a release for the provided manifest at manifest.yaml"} + {"command":"ship","sub-header":"Releasing consul v0.33.0","time":"2021-12-09T14:08:31-06:00"} + {"level":"info","command":"ship","chart":"consul","version":"0.33.0","namespace":"default","time":"2021-12-09T14:08:31-06:00","message":"Running helm install/upgrade with arguments: upgrade --install consul https://helm.releases.hashicorp.com/consul-0.33.0.tgz --namespace default --create-namespace --set global.chart.name=consul --set global.chart.version=0.33.0"} + {"level":"info","command":"ship","chart":"consul","version":"0.33.0","namespace":"default","time":"2021-12-09T14:08:37-06:00","message":"Release \"consul\" has been upgraded. Happy Helming!\nNAME: consul\nLAST DEPLOYED: Thu Dec 9 14:08:37 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 3\nNOTES:\nThank you for installing HashiCorp Consul!\n\nNow that you have deployed Consul, you should look over the docs on using \nConsul with Kubernetes available here: \n\nhttps://www.consul.io/docs/platform/k8s/index.html\n\n\nYour release is named consul.\n\nTo learn more about the release, run:\n\n $ helm status consul\n $ helm get all consul\n"} + {"command":"ship","sub-header":"Releasing victoria-metrics-cluster v0.8.24","time":"2021-12-09T14:08:37-06:00"} + {"level":"info","command":"ship","chart":"victoria-metrics-cluster","version":"0.8.24","namespace":"default","time":"2021-12-09T14:08:37-06:00","message":"Running helm install/upgrade with arguments: upgrade --install victoria-metrics-cluster charts/victoria-metrics-cluster-0.8.24.tgz --namespace default --create-namespace --set global.chart.name=victoria-metrics-cluster --set global.chart.version=0.8.24"} + {"level":"info","command":"ship","chart":"victoria-metrics-cluster","version":"0.8.24","namespace":"default","time":"2021-12-09T14:08:39-06:00","message":"Release \"victoria-metrics-cluster\" has been upgraded. Happy Helming!\nNAME: victoria-metrics-cluster\nLAST DEPLOYED: Thu Dec 9 14:08:38 2021\nNAMESPACE: default\nSTATUS: deployed\nREVISION: 3\nTEST SUITE: None\nNOTES:\nWrite API:\n\nThe Victoria Metrics write api can be accessed via port 8480 on the following DNS name from within your cluster:\nvictoria-metrics-cluster-vminsert.default.svc.cluster.local\n\nGet the Victoria Metrics insert service URL by running these commands in the same shell:\n export POD_NAME=$(kubectl get pods --namespace default -l \"app=vminsert\" -o jsonpath=\"{.items[0].metadata.name}\")\n kubectl --namespace default port-forward $POD_NAME 8480\n\nYou need to update your prometheus configuration file and add next lines into it:\n\nprometheus.yml\n```yaml\nremote_write:\n - url: \"http:///insert/0/prometheus/\"\n\n```\n\nfor e.g. inside the kubernetes cluster:\n```yaml\nremote_write:\n - url: \"http://victoria-metrics-cluster-vminsert.default.svc.cluster.local:8480/insert/0/prometheus/\"\n\n```\nRead API:\n\nThe Victoria Metrics read api can be accessed via port 8481 on the following DNS name from within your cluster:\nvictoria-metrics-cluster-vmselect.default.svc.cluster.local\n\nGet the Victoria Metrics select service URL by running these commands in the same shell:\n export POD_NAME=$(kubectl get pods --namespace default -l \"app=vmselect\" -o jsonpath=\"{.items[0].metadata.name}\")\n kubectl --namespace default port-forward $POD_NAME 8481\n\nYou need to update specify select service URL in your Grafana:\n NOTE: you need to use Prometheus Data Source\n\nInput for URL field in Grafana\n\n```\nhttp:///select/0/prometheus/\n```\n\nfor e.g. inside the kubernetes cluster:\n```\nhttp://victoria-metrics-cluster-vmselect.default.svc.cluster.local:8481/select/0/prometheus/\"\n```\n"} + {"level":"info","command":"ship","time":"2021-12-09T14:08:39-06:00","message":"Ship status: success. Recording status, manifest to configmap loftsman-my-first-manifest in namespace loftsman"} + {"level":"info","command":"ship","time":"2021-12-09T14:08:39-06:00","message":"Recording log data to configmap loftsman-my-first-manifest-ship-log in namespace loftsman"} +kind: ConfigMap +metadata: + creationTimestamp: "2021-12-09T20:07:38Z" + labels: + app.kubernetes.io/managed-by: loftsman + name: loftsman-my-first-manifest-ship-log + namespace: loftsman + resourceVersion: "16887" + uid: f64b003c-db68-43f5-9d5f-22dc854f37a8 +``` + +Let's look at all the individual pieces: +* `data."loftsman.log"`: is a record of the full log of the `loftsman ship` run, in JSON/machine-readable log format + ### Identifying and Fixing Errors This section is a work-in-progress, so bear with us as we build it out. In the meantime, these are the most helpful tips: diff --git a/internal/interfaces/kubernetes.go b/internal/interfaces/kubernetes.go index 2baddd5..1c3790e 100644 --- a/internal/interfaces/kubernetes.go +++ b/internal/interfaces/kubernetes.go @@ -10,7 +10,8 @@ type Kubernetes interface { IsRetryError(err error) bool EnsureNamespace(name string) error FindConfigMap(name string, namespace string, withKey string, withValue string) (*v1.ConfigMap, error) - InitializeConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) + InitializeShipConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) + InitializeLogConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) PatchConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) GetSecretKeyValue(secretName string, namespace string, dataKey string) (string, error) } diff --git a/internal/kubernetes/kubernetes.go b/internal/kubernetes/kubernetes.go index 2f8b514..89b4102 100644 --- a/internal/kubernetes/kubernetes.go +++ b/internal/kubernetes/kubernetes.go @@ -133,17 +133,17 @@ func (k *Kubernetes) FindConfigMap(name string, namespace string, withKey string return result, err } -// InitializeConfigMap will ensure a configmap exists by name, in a namespace, with data. If an existing configmap -// is found, the previous configmap's data will be persisted to a an annotation on the new version of the configmap -func (k *Kubernetes) InitializeConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { +// InitializeShipConfigMap will ensure a configmap exists by name, in a namespace, with data. If an existing configmap +// is found and it is presisting previous data, then remove any previous data in the new version of the configmap +func (k *Kubernetes) InitializeShipConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { var err error var result *v1.ConfigMap - previousDataAnnotationKey := "loftsman.io/previous-data" + logConfigMapName := fmt.Sprintf("%s-ship-log", name) err = retry.OnError(retry.DefaultBackoff, k.IsRetryError, func() error { result, err = k.client.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) if kerrors.IsNotFound(err) { annotations := make(map[string]string) - annotations[previousDataAnnotationKey] = "" + annotations["loftsman.io/ship-log-configmap"] = logConfigMapName result, err = k.client.CoreV1().ConfigMaps(namespace).Create(context.Background(), &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -158,17 +158,56 @@ func (k *Kubernetes) InitializeConfigMap(name string, namespace string, data map if err != nil { return err } - previousData, err := json.Marshal(result.Data) + + // Remove legacy annoations and fields + patchData := map[string]interface{}{ + "metadata": map[string]interface{}{ + "annotations": map[string]interface{}{ + "loftsman.io/previous-data": nil, + "loftsman.io/ship-log-configmap": logConfigMapName, + }, + }, + "data": map[string]interface{}{ + "loftsman.log": nil, + }, + } + patchDataEncoded, err := json.Marshal(patchData) if err != nil { return err } - result.ObjectMeta.Annotations[previousDataAnnotationKey] = string(previousData) - patchData, err := json.Marshal(v1.ConfigMap{ - ObjectMeta: result.ObjectMeta, - Data: data, - }) + result, err = k.client.CoreV1().ConfigMaps(namespace).Patch(context.Background(), name, - types.StrategicMergePatchType, []byte(patchData), metav1.PatchOptions{}) + types.MergePatchType, []byte(patchDataEncoded), metav1.PatchOptions{}) + + return err + }) + return result, err +} + +// InitializeLogConfigMap will ensure a configmap exists by name, in a namespace, with data. If an existing configmap +// is found then it will not be modifed +func (k *Kubernetes) InitializeLogConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { + var err error + var result *v1.ConfigMap + err = retry.OnError(retry.DefaultBackoff, k.IsRetryError, func() error { + result, err = k.client.CoreV1().ConfigMaps(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if kerrors.IsNotFound(err) { + annotations := make(map[string]string) + result, err = k.client.CoreV1().ConfigMaps(namespace).Create(context.Background(), &v1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: k.getCommonLabels(), + Annotations: annotations, + }, + Data: data, + }, metav1.CreateOptions{}) + return err + } + if err != nil { + return err + } + return err }) return result, err diff --git a/internal/kubernetes/kubernetes_test.go b/internal/kubernetes/kubernetes_test.go index 2741390..48dfc44 100644 --- a/internal/kubernetes/kubernetes_test.go +++ b/internal/kubernetes/kubernetes_test.go @@ -102,7 +102,7 @@ func TestFindConfigMapNotFound(t *testing.T) { } } -func TestInitializeConfigMapNew(t *testing.T) { +func TestInitializeShipConfigMapNew(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", `=~http://loftsman-tests`, httpmock.NewStringResponder(404, `{}`)) @@ -112,13 +112,13 @@ func TestInitializeConfigMapNew(t *testing.T) { data := make(map[string]string) data["one"] = "1" data["two"] = "2" - _, err := k.InitializeConfigMap("loftsman-tests", "default", data) + _, err := k.InitializeShipConfigMap("loftsman-tests", "default", data) if err != nil { t.Errorf("Got unexpected error from kubernetes.TestInitializeConfigMapNew(): %s", err) } } -func TestInitializeConfigMapExists(t *testing.T) { +func TestInitializeShipConfigMapExists(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() httpmock.RegisterResponder("GET", `=~http://loftsman-tests`, httpmock.NewStringResponder(200, `{"metadata": {"annotations": {}}}`)) @@ -129,12 +129,44 @@ func TestInitializeConfigMapExists(t *testing.T) { data := make(map[string]string) data["one"] = "1" data["two"] = "2" - _, err := k.InitializeConfigMap("loftsman-tests", "default", data) + _, err := k.InitializeShipConfigMap("loftsman-tests", "default", data) if err != nil { t.Errorf("Got unexpected error from kubernetes.TestInitializeConfigMapExists(): %s", err) } } +func TestInitializeLogConfigMapNew(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + httpmock.RegisterResponder("GET", `=~http://loftsman-tests`, httpmock.NewStringResponder(404, `{}`)) + httpmock.RegisterResponder("POST", `=~http://loftsman-tests`, httpmock.NewStringResponder(200, `{}`)) + k := &Kubernetes{} + _ = k.Initialize("./.test-fixtures/kubeconfig.yaml", "default") + data := make(map[string]string) + data["one"] = "1" + data["two"] = "2" + _, err := k.InitializeLogConfigMap("loftsman-tests-ship-log", "default", data) + if err != nil { + t.Errorf("Got unexpected error from kubernetes.TestInitializeLogConfigMapNew(): %s", err) + } +} + +func TestInitializeLogConfigMapExists(t *testing.T) { + httpmock.Activate() + defer httpmock.DeactivateAndReset() + httpmock.RegisterResponder("GET", `=~http://loftsman-tests`, httpmock.NewStringResponder(200, `{"metadata": {"annotations": {}}}`)) + httpmock.RegisterResponder("POST", `=~http://loftsman-tests`, httpmock.NewStringResponder(200, `{}`)) + httpmock.RegisterResponder("PATCH", `=~http://loftsman-tests`, httpmock.NewStringResponder(200, `{}`)) + k := &Kubernetes{} + _ = k.Initialize("./.test-fixtures/kubeconfig.yaml", "default") + data := make(map[string]string) + data["one"] = "1" + data["two"] = "2" + _, err := k.InitializeLogConfigMap("loftsman-tests-ship-log", "default", data) + if err != nil { + t.Errorf("Got unexpected error from kubernetes.TestInitializeLogConfigMapExists(): %s", err) + } +} func TestPatchConfigMap(t *testing.T) { httpmock.Activate() defer httpmock.DeactivateAndReset() diff --git a/internal/loftsman.go b/internal/loftsman.go index 98cd83e..da0c020 100644 --- a/internal/loftsman.go +++ b/internal/loftsman.go @@ -38,7 +38,8 @@ const ( statusCancelled = "cancelled" statusCrashed = "crashed" statusAvasted = "avasted" - configMapNameTemplate = "loftsman-%s" + shipConfigMapNameTemplate = "loftsman-%s" + logConfigMapNameTemplate = "loftsman-%s-ship-log" ) // To reduce the need for always initializing cluster connectivity and internal objects @@ -119,14 +120,17 @@ func (loftsman *Loftsman) Ship() error { loftsman.logger.Header("Shipping your Helm workloads with Loftsman") - configMapName := fmt.Sprintf(configMapNameTemplate, loftsman.Settings.Manifest.Name) - configMapData := make(map[string]string) + shipConfigMapName := fmt.Sprintf(shipConfigMapNameTemplate, loftsman.Settings.Manifest.Name) + shipConfigMapData := make(map[string]string) + logConfigMapName := fmt.Sprintf(logConfigMapNameTemplate, loftsman.Settings.Manifest.Name) + logConfigMapData := make(map[string]string) loftsman.logger.Info().Msgf("Ensuring that the %s namespace exists", loftsman.Settings.Namespace) if err = loftsman.kubernetes.EnsureNamespace(loftsman.Settings.Namespace); err != nil { return loftsman.fail(fmt.Errorf("Error ensuring that the %s namespace exists: %s", loftsman.Settings.Namespace, err)) } - activeConfigMap, err := loftsman.kubernetes.FindConfigMap(configMapName, loftsman.Settings.Namespace, statusKey, statusActive) + + activeConfigMap, err := loftsman.kubernetes.FindConfigMap(shipConfigMapName, loftsman.Settings.Namespace, statusKey, statusActive) if err != nil { return loftsman.fail(fmt.Errorf("Error determining if another loftsman ship is in progress for manifest %s: %s", loftsman.Settings.Manifest.Name, err)) } @@ -147,20 +151,25 @@ func (loftsman *Loftsman) Ship() error { loftsman.logger.Info().Msgf("Running a release for the provided manifest at %s", loftsman.Settings.Manifest.Path) - configMapData[statusKey] = statusActive + shipConfigMapData[statusKey] = statusActive sigChannel := make(chan os.Signal) signal.Notify(sigChannel, os.Interrupt, os.Kill, syscall.SIGTERM, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT) go func() { <-sigChannel - loftsman.recordShipResult(configMapName, configMapData, statusCancelled) + loftsman.recordShipResult(shipConfigMapName, shipConfigMapData, statusCancelled) + loftsman.recordShipLog(logConfigMapName, logConfigMapData) os.Exit(0) }() - if _, err := loftsman.kubernetes.InitializeConfigMap(configMapName, loftsman.Settings.Namespace, configMapData); err != nil { - return loftsman.fail(fmt.Errorf("Error creating ship configmap %s in namespace %s: %s", configMapName, loftsman.Settings.Namespace, err)) + if _, err := loftsman.kubernetes.InitializeShipConfigMap(shipConfigMapName, loftsman.Settings.Namespace, shipConfigMapData); err != nil { + return loftsman.fail(fmt.Errorf("Error creating ship configmap %s in namespace %s: %s", shipConfigMapName, loftsman.Settings.Namespace, err)) + } + if _, err := loftsman.kubernetes.InitializeLogConfigMap(logConfigMapName, loftsman.Settings.Namespace, logConfigMapData); err != nil { + return loftsman.fail(fmt.Errorf("Error creating log configmap %s in namespace %s: %s", logConfigMapName, loftsman.Settings.Namespace, err)) } crashHandler := func() { if r := recover(); r != nil { - loftsman.recordShipResult(configMapName, configMapData, statusCrashed) + loftsman.recordShipResult(shipConfigMapName, shipConfigMapData, statusCrashed) + loftsman.recordShipLog(logConfigMapName, logConfigMapData) loftsman.fail(fmt.Errorf("%v", r)) } } @@ -172,7 +181,8 @@ func (loftsman *Loftsman) Ship() error { if len(releaseErrors) > 0 { releaseStatus = statusFailed } - loftsman.recordShipResult(configMapName, configMapData, releaseStatus) + loftsman.recordShipResult(shipConfigMapName, shipConfigMapData, releaseStatus) + loftsman.recordShipLog(logConfigMapName, logConfigMapData) if len(releaseErrors) > 0 { loftsman.logger.ClosingHeader("Encountered errors during the manifest release:") @@ -190,10 +200,9 @@ func (loftsman *Loftsman) Ship() error { } func (loftsman *Loftsman) recordShipResult(configMapName string, configMapData map[string]string, status string) { - loftsman.logger.Info().Msgf("Ship status: %s. Recording status, manifest, and log data to configmap %s in namespace %s", status, - configMapName, loftsman.Settings.Namespace) + loftsman.logger.Info().Msgf("Ship status: %s. Recording status, manifest to configmap %s in namespace %s", status, + configMapName, loftsman.Settings.Namespace) configMapData["manifest.yaml"] = string(loftsman.Settings.Manifest.Content) - configMapData["loftsman.log"] = loftsman.logger.GetRecord() configMapData["status"] = status if _, err := loftsman.kubernetes.PatchConfigMap(configMapName, loftsman.Settings.Namespace, configMapData); err != nil { loftsman.logger.Error().Err(fmt.Errorf("Error patching configmap %s with result, manifest, and log data to the %s namespace: %s", @@ -202,6 +211,20 @@ func (loftsman *Loftsman) recordShipResult(configMapName string, configMapData m } } +func (loftsman *Loftsman) recordShipLog(configMapName string, configMapData map[string]string) { + loftsman.logger.Info().Msgf("Recording log data to configmap %s in namespace %s", + configMapName, loftsman.Settings.Namespace) + + logConfigMapData := make(map[string]string) + logConfigMapData["loftsman.log"] = loftsman.logger.GetRecord() + + if _, err := loftsman.kubernetes.PatchConfigMap(configMapName, loftsman.Settings.Namespace, logConfigMapData); err != nil { + loftsman.logger.Error().Err(fmt.Errorf("Error patching configmap %s with log data to the %s namespace: %s", + configMapName, loftsman.Settings.Namespace, err)).Msg("") + fmt.Println("") + } +} + // ManifestCreate will create a new manifest and output it to stdout func (loftsman *Loftsman) ManifestCreate() error { var err error @@ -255,7 +278,7 @@ func (loftsman *Loftsman) Avast() error { return nil } - configMapName := fmt.Sprintf(configMapNameTemplate, loftsman.Settings.Manifest.Name) + configMapName := fmt.Sprintf(shipConfigMapNameTemplate, loftsman.Settings.Manifest.Name) activeConfigMap, err := loftsman.kubernetes.FindConfigMap(configMapName, loftsman.Settings.Namespace, statusKey, statusActive) if err != nil { diff --git a/mocks/custom-mocks/kubernetes.go b/mocks/custom-mocks/kubernetes.go index 991e3b9..387848b 100644 --- a/mocks/custom-mocks/kubernetes.go +++ b/mocks/custom-mocks/kubernetes.go @@ -31,7 +31,8 @@ func GetKubernetesMock(triggerFoundConfigMap bool) *kubernetesmocks.Kubernetes { } return nil }, nil) - k.On("InitializeConfigMap", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("map[string]string")).Return(&v1.ConfigMap{}, nil) + k.On("InitializeShipConfigMap", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("map[string]string")).Return(&v1.ConfigMap{}, nil) + k.On("InitializeLogConfigMap", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("map[string]string")).Return(&v1.ConfigMap{}, nil) k.On("PatchConfigMap", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("map[string]string")).Return(&v1.ConfigMap{}, nil) k.On("GetSecretKeyValue", mock.AnythingOfType("string"), mock.AnythingOfType("string"), mock.AnythingOfType("string")).Return(TestSecretKeyValue, nil) return k diff --git a/mocks/internal/interfaces/Helm.go b/mocks/internal/interfaces/Helm.go index 1549c57..0a460b1 100644 --- a/mocks/internal/interfaces/Helm.go +++ b/mocks/internal/interfaces/Helm.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.9.4. DO NOT EDIT. package mocks diff --git a/mocks/internal/interfaces/Kubernetes.go b/mocks/internal/interfaces/Kubernetes.go index da312c6..092fa29 100644 --- a/mocks/internal/interfaces/Kubernetes.go +++ b/mocks/internal/interfaces/Kubernetes.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.9.4. DO NOT EDIT. package mocks @@ -84,8 +84,31 @@ func (_m *Kubernetes) Initialize(kubeconfigPath string, kubeContext string) erro return r0 } -// InitializeConfigMap provides a mock function with given fields: name, namespace, data -func (_m *Kubernetes) InitializeConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { +// InitializeLogConfigMap provides a mock function with given fields: name, namespace, data +func (_m *Kubernetes) InitializeLogConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { + ret := _m.Called(name, namespace, data) + + var r0 *v1.ConfigMap + if rf, ok := ret.Get(0).(func(string, string, map[string]string) *v1.ConfigMap); ok { + r0 = rf(name, namespace, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.ConfigMap) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string, map[string]string) error); ok { + r1 = rf(name, namespace, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InitializeShipConfigMap provides a mock function with given fields: name, namespace, data +func (_m *Kubernetes) InitializeShipConfigMap(name string, namespace string, data map[string]string) (*v1.ConfigMap, error) { ret := _m.Called(name, namespace, data) var r0 *v1.ConfigMap diff --git a/mocks/internal/interfaces/Manifest.go b/mocks/internal/interfaces/Manifest.go index 4d08e2d..b4327a2 100644 --- a/mocks/internal/interfaces/Manifest.go +++ b/mocks/internal/interfaces/Manifest.go @@ -1,4 +1,4 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. +// Code generated by mockery v2.9.4. DO NOT EDIT. package mocks