From bd072b5fad7b12335000b937556b8d86ed5ae88c Mon Sep 17 00:00:00 2001 From: Chuck Ha Date: Tue, 7 Nov 2017 09:00:29 -0500 Subject: [PATCH] [WIP] Change how plugins work Instead of creating our own schema for a plugin we will use annotations to get metadata to sonobuoy instead. We can then define full kubernetes resources as plugins. The resources we define are templates. The idea is to eventually move these templates over to ksonnet and have a nice ksonnet library for the shared pieces between plugins. Remaining work: * Whatever configuration can be passed into the template should be allowed to be passed into the template (`RESULTS_DIR`). * Add a test or two to the plugin.Interface implementations Signed-off-by: Chuck Ha --- examples/quickstart.yaml | 227 +++++++++++++---------- pkg/config/loader_test.go | 17 -- pkg/plugin/driver/daemonset/daemonset.go | 161 ++++------------ pkg/plugin/driver/job/job.go | 146 ++++----------- pkg/plugin/driver/utils/utils.go | 25 ++- pkg/plugin/interface.go | 24 +-- pkg/plugin/loader/loader.go | 127 ++++--------- pkg/worker/config.go | 1 + plugins.d/{e2e.yaml => e2e.tmpl} | 25 ++- plugins.d/systemd_logs.tmpl | 87 +++++++++ plugins.d/systemdlogs.yaml | 73 -------- 11 files changed, 356 insertions(+), 557 deletions(-) rename plugins.d/{e2e.yaml => e2e.tmpl} (76%) create mode 100644 plugins.d/systemd_logs.tmpl delete mode 100644 plugins.d/systemdlogs.yaml diff --git a/examples/quickstart.yaml b/examples/quickstart.yaml index 9cb07082e..d568d97ab 100644 --- a/examples/quickstart.yaml +++ b/examples/quickstart.yaml @@ -112,91 +112,48 @@ metadata: --- apiVersion: v1 data: - e2e.yaml: | - driver: Job - name: e2e - resultType: e2e + e2e.tmpl: | + apiVersion: v1 + kind: Pod + metadata: + name: "sonobuoy-e2e-job-{{.SessionID}}" + annotations: + "sonobuoy-plugin": "e2e" + "sonobuoy-driver": "Job" + "sonobuoy-result-type": "e2e" + labels: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + namespace: "{{.Namespace}}" spec: - containers: - - env: - - name: E2E_FOCUS - value: Pods should be submitted and removed - image: gcr.io/heptio-images/kube-conformance:v1.8 - imagePullPolicy: Always - name: e2e - volumeMounts: - - mountPath: /tmp/results - name: results - - command: - - sh - - -c - - /sonobuoy worker global -v 5 --logtostderr - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: RESULTS_DIR - value: /tmp/results - image: gcr.io/heptio-images/sonobuoy:master - imagePullPolicy: Always - name: sonobuoy-worker - volumeMounts: - - mountPath: /etc/sonobuoy - name: config - - mountPath: /tmp/results - name: results - restartPolicy: Never serviceAccountName: sonobuoy-serviceaccount tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master + - key: node-role.kubernetes.io/master operator: Exists + effect: NoSchedule - key: CriticalAddonsOnly operator: Exists - volumes: - - emptyDir: {} - name: results - - configMap: - name: __SONOBUOY_CONFIGMAP__ - name: config - systemdlogs.yaml: | - driver: DaemonSet - name: systemd_logs - resultType: systemd_logs - spec: + restartPolicy: Never containers: - - command: - - sh - - -c - - /get_systemd_logs.sh && sleep 3600 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: RESULTS_DIR - value: /tmp/results - - name: CHROOT_DIR - value: /node - image: gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest + - name: e2e + image: gcr.io/heptio-images/kube-conformance:latest imagePullPolicy: Always - name: systemd-logs - securityContext: - privileged: true + # NOTE: Full conformance can take a while depending on your cluster size. + # As a result, only a single test is set atm to verify correctness. + # Operators that want the complete test results can comment out the + # env section. + env: + - name: E2E_FOCUS + value: "Pods should be submitted and removed" volumeMounts: - - mountPath: /node - name: root - - mountPath: /tmp/results - name: results - - mountPath: /etc/sonobuoy - name: config - - command: + - name: results + mountPath: /tmp/results + - name: sonobuoy-worker + command: - sh - -c - - /sonobuoy worker single-node -v 5 --logtostderr && sleep 3600 + - /sonobuoy worker global -v 5 --logtostderr env: - name: NODE_NAME valueFrom: @@ -205,35 +162,107 @@ data: fieldPath: spec.nodeName - name: RESULTS_DIR value: /tmp/results + - name: MASTER_URL + value: "{{.MasterAddress}}" + - name: RESULT_TYPE + value: "e2e" image: gcr.io/heptio-images/sonobuoy:master imagePullPolicy: Always - name: sonobuoy-worker - securityContext: - privileged: true volumeMounts: - - mountPath: /tmp/results - name: results - - mountPath: /etc/sonobuoy - name: config - dnsPolicy: ClusterFirstWithHostNet - hostIPC: true - hostNetwork: true - hostPID: true - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - operator: Exists - - key: CriticalAddonsOnly - operator: Exists + - name: results + mountPath: /tmp/results volumes: - - hostPath: - path: / - name: root - - emptyDir: {} - name: results - - configMap: - name: __SONOBUOY_CONFIGMAP__ - name: config + - name: results + emptyDir: {} + systemd_logs.tmpl: | + apiVersion: extensions/v1beta1 + kind: DaemonSet + metadata: + name: "sonobuoy-systemd-logs-config-{{.SessionID}}" + annotations: + "sonobuoy-plugin": "systemd_logs" + "sonobuoy-driver": "DaemonSet" + "sonobuoy-result-type": "systemd_logs" + labels: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + namespace: {{.Namespace}} + spec: + selector: + matchLabels: + "sonobuoy-run": "{{.SessionID}}" + template: + metadata: + labels: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + spec: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + hostNetwork: true + hostIPC: true + hostPID: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: systemd-logs + command: + - sh + - -c + - /get_systemd_logs.sh && sleep 3600 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: RESULTS_DIR + value: /tmp/results + - name: CHROOT_DIR + value: /node + image: gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - mountPath: /node + name: root + - mountPath: /tmp/results + name: results + - name: sonobuoy-worker + command: + - sh + - -c + - /sonobuoy worker single-node -v 5 --logtostderr && sleep 3600 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: RESULTS_DIR + value: /tmp/results + - name: MASTER_URL + value: "{{.MasterAddress}}" + image: gcr.io/heptio-images/sonobuoy:master + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - mountPath: /tmp/results + name: results + volumes: + - name: root + hostPath: + path: / + - name: results + emptyDir: {} + kind: ConfigMap metadata: labels: diff --git a/pkg/config/loader_test.go b/pkg/config/loader_test.go index 59f7792bb..4f707cf1b 100644 --- a/pkg/config/loader_test.go +++ b/pkg/config/loader_test.go @@ -130,26 +130,9 @@ func TestLoadAllPlugins(t *testing.T) { t.Fatalf("First result of LoadAllPlugins has the wrong name: %v != systemd_logs", name) } - if len(dsplugin.GetPodSpec().Containers) != 2 { - t.Fatalf("DaemonSetPlugin should have 2 containers, got %v", len(dsplugin.GetPodSpec().Containers)) - } - - firstContainerName := dsplugin.GetPodSpec().Containers[0].Name - if firstContainerName != "systemd-logs" { - t.Fatalf("systemd_logs plugin had unexpected container name (%v != %v)", firstContainerName, "systemd-logs") - } - jobplugin := plugins[1] if name := jobplugin.GetName(); name != "e2e" { t.Fatalf("Second result of LoadAllPlugins has the wrong name: %v != e2e", name) } - if len(dsplugin.GetPodSpec().Containers) != 2 { - t.Fatalf("JobPlugin should have 2 containers, got %d", len(jobplugin.GetPodSpec().Containers)) - } - - firstContainerName = jobplugin.GetPodSpec().Containers[0].Name - if firstContainerName != "e2e" { - t.Fatalf("e2e plugin had unexpected container name (%v != %v)", firstContainerName, "e2e") - } } diff --git a/pkg/plugin/driver/daemonset/daemonset.go b/pkg/plugin/driver/daemonset/daemonset.go index 5344c963a..a82f9ee8b 100644 --- a/pkg/plugin/driver/daemonset/daemonset.go +++ b/pkg/plugin/driver/daemonset/daemonset.go @@ -17,34 +17,29 @@ limitations under the License. package daemonset import ( - "encoding/hex" + "bytes" "fmt" - "strings" "time" "github.com/heptio/sonobuoy/pkg/errlog" "github.com/heptio/sonobuoy/pkg/plugin" "github.com/heptio/sonobuoy/pkg/plugin/driver/utils" "github.com/pkg/errors" - gouuid "github.com/satori/go.uuid" v1 "k8s.io/api/core/v1" v1beta1ext "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/json" + + kuberuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" ) // Plugin is a plugin driver that dispatches containers to each node, // expecting each pod to report to the master. type Plugin struct { - Name string - PodSpec *v1.PodSpec `json:"spec"` - Config *plugin.WorkerConfig `json:"config"` - Namespace string - UUID gouuid.UUID - ResultType string - - cleanedUp bool + Definition plugin.Definition + DfnTemplateData *plugin.DefinitionTemplateData + cleanedUp bool } // Ensure DaemonSetPlugin implements plugin.Interface @@ -54,23 +49,16 @@ var _ plugin.Interface = &Plugin{} // and sonobuoy master address func NewPlugin(namespace string, dfn plugin.Definition, cfg *plugin.WorkerConfig) *Plugin { return &Plugin{ - Name: dfn.Name, - UUID: gouuid.NewV4(), - ResultType: dfn.ResultType, - PodSpec: &dfn.PodSpec, - Namespace: namespace, - Config: cfg, + Definition: dfn, + DfnTemplateData: &plugin.DefinitionTemplateData{ + SessionID: utils.GetSessionID(), + MasterAddress: cfg.MasterURL, + Namespace: namespace, + }, + cleanedUp: false, } } -func (p *Plugin) configMapName() string { - return "sonobuoy-" + strings.Replace(p.Name, "_", "-", -1) + "-config-" + p.GetSessionID() -} - -func (p *Plugin) daemonSetName() string { - return "sonobuoy-" + strings.Replace(p.Name, "_", "-", -1) + "-daemonset-" + p.GetSessionID() -} - // ExpectedResults returns the list of results expected for this daemonset func (p *Plugin) ExpectedResults(nodes []v1.Node) []plugin.ExpectedResult { ret := make([]plugin.ExpectedResult, 0, len(nodes)) @@ -78,7 +66,7 @@ func (p *Plugin) ExpectedResults(nodes []v1.Node) []plugin.ExpectedResult { for _, node := range nodes { ret = append(ret, plugin.ExpectedResult{ NodeName: node.Name, - ResultType: p.ResultType, + ResultType: p.GetResultType(), }) } @@ -87,26 +75,22 @@ func (p *Plugin) ExpectedResults(nodes []v1.Node) []plugin.ExpectedResult { // GetResultType returns the ResultType for this plugin (to adhere to plugin.Interface) func (p *Plugin) GetResultType() string { - return p.ResultType + return p.Definition.ResultType } // Run dispatches worker pods according to the DaemonSet's configuration. func (p *Plugin) Run(kubeclient kubernetes.Interface) error { - var err error - configMap, err := p.buildConfigMap() - if err != nil { - return err - } - daemonSet, err := p.buildDaemonSet() - if err != nil { - return err + var ( + b bytes.Buffer + daemonSet v1beta1ext.DaemonSet + ) + p.Definition.Template.Execute(&b, p.DfnTemplateData) + if err := kuberuntime.DecodeInto(scheme.Codecs.UniversalDecoder(), b.Bytes(), &daemonSet); err != nil { + return errors.Wrapf(err, "could not decode the executed template into a daemonset. Plugin name: ", p.GetName()) } - // Submit them to the API server, capturing the results - if _, err = kubeclient.CoreV1().ConfigMaps(p.Namespace).Create(configMap); err != nil { - return errors.Wrapf(err, "could not create ConfigMap for daemonset plugin %v", p.GetName()) - } - if _, err = kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).Create(daemonSet); err != nil { + // TODO(chuckha): switch to .Apps() once extensions has been deprecated. + if _, err := kubeclient.ExtensionsV1beta1().DaemonSets(p.DfnTemplateData.Namespace).Create(&daemonSet); err != nil { return errors.Wrapf(err, "could not create DaemonSet for daemonset plugin %v", p.GetName()) } @@ -126,21 +110,12 @@ func (p *Plugin) Cleanup(kubeclient kubernetes.Interface) { } // Delete the DaemonSet created by this plugin - err := kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).DeleteCollection( - &deleteOptions, - listOptions, - ) - if err != nil { - errlog.LogError(errors.Wrapf(err, "could not delete DaemonSet %v for daemonset plugin %v", p.daemonSetName(), p.GetName())) - } - - // Delete the ConfigMap created by this plugin - err = kubeclient.CoreV1().ConfigMaps(p.Namespace).DeleteCollection( + err := kubeclient.ExtensionsV1beta1().DaemonSets(p.DfnTemplateData.Namespace).DeleteCollection( &deleteOptions, listOptions, ) if err != nil { - errlog.LogError(errors.Wrapf(err, "could not delete ConfigMap %v for daemonset plugin %v", p.configMapName(), p.GetName())) + errlog.LogError(errors.Wrapf(err, "could not delete DaemonSet-%v for daemonset plugin %v", p.DfnTemplateData.SessionID, p.GetName())) } } @@ -152,13 +127,13 @@ func (p *Plugin) listOptions() metav1.ListOptions { // findDaemonSet gets the daemonset that we created, using a kubernetes label search func (p *Plugin) findDaemonSet(kubeclient kubernetes.Interface) (*v1beta1ext.DaemonSet, error) { - dsets, err := kubeclient.ExtensionsV1beta1().DaemonSets(p.Namespace).List(p.listOptions()) + dsets, err := kubeclient.ExtensionsV1beta1().DaemonSets(p.DfnTemplateData.Namespace).List(p.listOptions()) if err != nil { return nil, errors.WithStack(err) } if len(dsets.Items) != 1 { - return nil, errors.Errorf("expected plugin %v to create 1 daemonset, found %v", p.Name, len(dsets.Items)) + return nil, errors.Errorf("expected plugin %v to create 1 daemonset, found %v", p.Definition.Name, len(dsets.Items)) } return &dsets.Items[0], nil @@ -193,7 +168,7 @@ func (p *Plugin) Monitor(kubeclient kubernetes.Interface, availableNodes []v1.No } // Find all the pods configured by this daemonset - pods, err := kubeclient.CoreV1().Pods(p.Namespace).List(p.listOptions()) + pods, err := kubeclient.CoreV1().Pods(p.DfnTemplateData.Namespace).List(p.listOptions()) if err != nil { errlog.LogError(errors.Wrapf(err, "could not find pods created by plugin %v, will retry", p.GetName())) // Likewise, if we can't query for pods, just retry next time. @@ -234,7 +209,7 @@ func (p *Plugin) Monitor(kubeclient kubernetes.Interface, availableNodes []v1.No "No pod was scheduled on node %v within %v. Check tolerations for plugin %v", node.Name, time.Now().Sub(ds.CreationTimestamp.Time), - p.Name, + p.Definition.Name, ), }, node.Name) } @@ -242,81 +217,11 @@ func (p *Plugin) Monitor(kubeclient kubernetes.Interface, availableNodes []v1.No } } -// GetSessionID returns a unique identifier for this dispatcher, used for tagging -// objects and cleaning them up later func (p *Plugin) GetSessionID() string { - ret := make([]byte, hex.EncodedLen(8)) - hex.Encode(ret, p.UUID.Bytes()[0:8]) - return string(ret) + return p.DfnTemplateData.SessionID } // GetName returns the name of this DaemonSet plugin func (p *Plugin) GetName() string { - return p.Name -} - -// GetPodSpec returns the pod spec for this DaemonSet -func (p *Plugin) GetPodSpec() *v1.PodSpec { - return p.PodSpec -} - -func (p *Plugin) buildConfigMap() (*v1.ConfigMap, error) { - // We get to build the worker config directly from our own data structures, - // this is where doing this natively in golang helps a lot (as opposed to - // shelling out to kubectl) - cfgjson, err := json.Marshal(p.Config) - if err != nil { - return nil, errors.WithStack(err) - } - - cmap := &v1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: p.configMapName(), - Labels: utils.ApplyDefaultLabels(p, map[string]string{}), - Namespace: p.Namespace, - }, - Data: map[string]string{ - "worker.json": string(cfgjson), - }, - } - - return cmap, nil -} - -func (p *Plugin) buildDaemonSet() (*v1beta1ext.DaemonSet, error) { - // Fix up the pod spec to use this session's config map - for _, vol := range p.PodSpec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == "__SONOBUOY_CONFIGMAP__" { - vol.ConfigMap.Name = p.configMapName() - } - } - - ds := &v1beta1ext.DaemonSet{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "extensions/v1beta1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: p.daemonSetName(), - Labels: utils.ApplyDefaultLabels(p, map[string]string{}), - Namespace: p.Namespace, - }, - Spec: v1beta1ext.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "sonobuoy-run": p.GetSessionID(), - }, - }, - Template: v1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: utils.ApplyDefaultLabels(p, map[string]string{}), - }, - Spec: *p.PodSpec, - }, - }, - } - - return ds, nil + return p.Definition.Name } diff --git a/pkg/plugin/driver/job/job.go b/pkg/plugin/driver/job/job.go index 51fe04635..77ff3e454 100644 --- a/pkg/plugin/driver/job/job.go +++ b/pkg/plugin/driver/job/job.go @@ -17,8 +17,7 @@ limitations under the License. package job import ( - "encoding/hex" - "strings" + "bytes" "time" "github.com/heptio/sonobuoy/pkg/errlog" @@ -27,23 +26,18 @@ import ( "github.com/pkg/errors" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/client-go/kubernetes" - gouuid "github.com/satori/go.uuid" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" ) // Plugin is a plugin driver that dispatches a single pod to the given // kubernetes cluster type Plugin struct { - Name string - PodSpec *v1.PodSpec `json:"spec"` - Config *plugin.WorkerConfig `json:"config"` - Namespace string - UUID gouuid.UUID - ResultType string - - cleanedUp bool + Definition plugin.Definition + DfnTemplateData *plugin.DefinitionTemplateData + cleanedUp bool } // Ensure Plugin implements plugin.Interface @@ -53,54 +47,42 @@ var _ plugin.Interface = &Plugin{} // and sonobuoy master address func NewPlugin(namespace string, dfn plugin.Definition, cfg *plugin.WorkerConfig) *Plugin { return &Plugin{ - Name: dfn.Name, - UUID: gouuid.NewV4(), - ResultType: dfn.ResultType, - PodSpec: &dfn.PodSpec, - Namespace: namespace, - Config: cfg, + Definition: dfn, + DfnTemplateData: &plugin.DefinitionTemplateData{ + SessionID: utils.GetSessionID(), + MasterAddress: cfg.MasterURL, + Namespace: namespace, + }, + cleanedUp: false, // be explicit } } -func (p *Plugin) configMapName() string { - return "sonobuoy-" + strings.Replace(p.Name, "_", "-", -1) + "-config-" + p.GetSessionID() -} - -func (p *Plugin) jobName() string { - return "sonobuoy-" + strings.Replace(p.Name, "_", "-", -1) + "-job-" + p.GetSessionID() -} - // ExpectedResults returns the list of results expected for this plugin. Since // a Job only launches one pod, only one result type is expected. func (p *Plugin) ExpectedResults(nodes []v1.Node) []plugin.ExpectedResult { return []plugin.ExpectedResult{ - plugin.ExpectedResult{ResultType: p.ResultType}, + plugin.ExpectedResult{ResultType: p.GetResultType()}, } } // GetResultType returns the ResultType for this plugin (to adhere to plugin.Interface) func (p *Plugin) GetResultType() string { - return p.ResultType + return p.Definition.ResultType } // Run dispatches worker pods according to the Job's configuration. func (p *Plugin) Run(kubeclient kubernetes.Interface) error { - var err error - configMap, err := p.buildConfigMap() - if err != nil { - return err - } - job, err := p.buildJob() - if err != nil { - return err + var ( + b bytes.Buffer + job v1.Pod + ) + p.Definition.Template.Execute(&b, p.DfnTemplateData) + if err := kuberuntime.DecodeInto(scheme.Codecs.UniversalDecoder(), b.Bytes(), &job); err != nil { + return errors.Wrapf(err, "could not decode executed template into a Job for plugin %v", p.GetName()) } - // Submit them to the API server, capturing the results - if _, err = kubeclient.CoreV1().ConfigMaps(p.Namespace).Create(configMap); err != nil { - return errors.Wrapf(err, "could not create ConfigMap resource for Job plugin %v", p.Name) - } - if _, err = kubeclient.CoreV1().Pods(p.Namespace).Create(job); err != nil { - return errors.Wrapf(err, "could not create Job resource for Job plugin %v", p.Name) + if _, err := kubeclient.CoreV1().Pods(p.DfnTemplateData.Namespace).Create(&job); err != nil { + return errors.Wrapf(err, "could not create Job resource for Job plugin %v", p.GetName()) } return nil @@ -157,21 +139,12 @@ func (p *Plugin) Cleanup(kubeclient kubernetes.Interface) { // single Pod, to get the restart semantics we want. But later if we // want to make this a real Job, we still need to delete pods manually // after deleting the job. - err := kubeclient.CoreV1().Pods(p.Namespace).DeleteCollection( - &deleteOptions, - listOptions, - ) - if err != nil { - errlog.LogError(errors.Wrapf(err, "error deleting pods for Job %v", p.jobName())) - } - - // Delete the ConfigMap created by this plugin - err = kubeclient.CoreV1().ConfigMaps(p.Namespace).DeleteCollection( + err := kubeclient.CoreV1().Pods(p.DfnTemplateData.Namespace).DeleteCollection( &deleteOptions, listOptions, ) if err != nil { - errlog.LogError(errors.Wrapf(err, "error deleting pods for Job %v", p.jobName())) + errlog.LogError(errors.Wrapf(err, "error deleting pods for Job-%v", p.GetSessionID())) } } @@ -185,80 +158,23 @@ func (p *Plugin) listOptions() metav1.ListOptions { // search. If no pod is found, or if multiple pods are found, returns an // error. func (p *Plugin) findPod(kubeclient kubernetes.Interface) (*v1.Pod, error) { - pods, err := kubeclient.CoreV1().Pods(p.Namespace).List(p.listOptions()) + pods, err := kubeclient.CoreV1().Pods(p.DfnTemplateData.Namespace).List(p.listOptions()) if err != nil { return nil, errors.WithStack(err) } if len(pods.Items) != 1 { - return nil, errors.Errorf("no pods were created by plugin %v", p.Name) + return nil, errors.Errorf("no pods were created by plugin %v", p.Definition.Name) } return &pods.Items[0], nil } -// GetSessionID returns a unique identifier for this dispatcher, used for tagging -// objects and cleaning them up later func (p *Plugin) GetSessionID() string { - ret := make([]byte, hex.EncodedLen(8)) - hex.Encode(ret, p.UUID.Bytes()[0:8]) - return string(ret) + return p.DfnTemplateData.SessionID } // GetName returns the name of this Job plugin func (p *Plugin) GetName() string { - return p.Name -} - -// GetPodSpec returns the pod spec for this Job -func (p *Plugin) GetPodSpec() *v1.PodSpec { - return p.PodSpec -} - -func (p *Plugin) buildConfigMap() (*v1.ConfigMap, error) { - // We get to build the worker config directly from our own data structures, - // this is where doing this natively in golang helps a lot (as opposed to - // shelling out to kubectl) - cfgjson, err := json.Marshal(p.Config) - if err != nil { - return nil, errors.WithStack(err) - } - - cmap := &v1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: p.configMapName(), - Labels: utils.ApplyDefaultLabels(p, map[string]string{}), - Namespace: p.Namespace, - }, - Data: map[string]string{ - "worker.json": string(cfgjson), - }, - } - - return cmap, nil -} - -func (p *Plugin) buildJob() (*v1.Pod, error) { - // Fix up the pod spec to use this session's config map - for _, vol := range p.PodSpec.Volumes { - if vol.ConfigMap != nil && vol.ConfigMap.Name == "__SONOBUOY_CONFIGMAP__" { - vol.ConfigMap.Name = p.configMapName() - } - } - - // NOTE: We're actually only constructing a pod with Never restart policy - // b/c K8s.Job semantics are broken. - job := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: p.jobName(), - Labels: utils.ApplyDefaultLabels(p, map[string]string{}), - Namespace: p.Namespace, - }, - Spec: *p.PodSpec, - } - - return job, nil + return p.Definition.Name } diff --git a/pkg/plugin/driver/utils/utils.go b/pkg/plugin/driver/utils/utils.go index a738f1b3d..2262334e6 100644 --- a/pkg/plugin/driver/utils/utils.go +++ b/pkg/plugin/driver/utils/utils.go @@ -18,14 +18,25 @@ package utils import ( "bytes" + "encoding/hex" "encoding/json" "fmt" "github.com/heptio/sonobuoy/pkg/plugin" + gouuid "github.com/satori/go.uuid" v1 "k8s.io/api/core/v1" ) +// GetSessionID generates a new session id. +// This is essentially an instance of a running plugin. +func GetSessionID() string { + uuid := gouuid.NewV4() + ret := make([]byte, hex.EncodedLen(8)) + hex.Encode(ret, uuid.Bytes()[0:8]) + return string(ret) +} + // IsPodFailing returns whether a plugin's pod is failing and isn't likely to // succeed. // TODO: this may require more revisions as we get more experience with @@ -78,17 +89,3 @@ func MakeErrorResult(resultType string, errdata map[string]interface{}, nodeName Extension: ".json", } } - -// ApplyDefaultLabels applies a default label set to the given -// map[string]string. All our resources should have a commmon label set, -// particularly a unique sesssion ID for sonobuoy run. This can allow fallback -// cleanup for this session by deleting any resources with -// `sonobuoy-run=` -func ApplyDefaultLabels(p plugin.Interface, labels map[string]string) map[string]string { - labels["component"] = "sonobuoy" - labels["tier"] = "analysis" - labels["sonobuoy-run"] = p.GetSessionID() - labels["sonobuoy-plugin"] = p.GetName() - - return labels -} diff --git a/pkg/plugin/interface.go b/pkg/plugin/interface.go index 688b8e1fc..1f1fdd3b7 100644 --- a/pkg/plugin/interface.go +++ b/pkg/plugin/interface.go @@ -19,6 +19,7 @@ package plugin import ( "io" "path" + "text/template" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" @@ -45,23 +46,22 @@ type Interface interface { GetResultType() string // GetName returns the name of this plugin GetName() string - // GetPodSpec returns the pod spec for this plugins - GetPodSpec() *v1.PodSpec - // GetSessionID returns a distinct identifier for this plugin in this - // sonobuoy session (for instance, for labeling resources created by - // this plugin.) - GetSessionID() string +} + +// A required piece of data to render the template found in Definition. +type DefinitionTemplateData struct { + SessionID string + MasterAddress string + Namespace string } // Definition defines a plugin's features, method of launch, and other // metadata about it. type Definition struct { - Driver string `json:"driver"` - Name string `json:"name"` - ResultType string `json:"resultType"` - RawPodSpec map[string]interface{} `json:"spec"` - - PodSpec v1.PodSpec // This is filled in by the plugin loader, since deserializing a pod spec is nontrivial + Driver string + Name string + ResultType string + Template *template.Template } // ExpectedResult is an expected result that a plugin will submit. This is so diff --git a/pkg/plugin/loader/loader.go b/pkg/plugin/loader/loader.go index de8ee5b7b..c0d814f87 100644 --- a/pkg/plugin/loader/loader.go +++ b/pkg/plugin/loader/loader.go @@ -17,22 +17,22 @@ limitations under the License. package loader import ( - "encoding/json" + "bytes" "io/ioutil" "os" "path" "path/filepath" + "text/template" - v1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes/scheme" - - "github.com/ghodss/yaml" "github.com/heptio/sonobuoy/pkg/plugin" "github.com/heptio/sonobuoy/pkg/plugin/driver/daemonset" "github.com/heptio/sonobuoy/pkg/plugin/driver/job" "github.com/pkg/errors" "github.com/sirupsen/logrus" kuberuntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) // LoadAllPlugins loads all plugins by finding plugin definitions in the given @@ -53,12 +53,12 @@ func LoadAllPlugins(namespace string, searchPath []string, selections []plugin.S continue } - p, err := scanPlugins(dir) + definitions, err := scanPlugins(dir) if err != nil { return ret, err } - defns = append(defns, p...) + defns = append(defns, definitions...) } for _, selection := range selections { @@ -78,9 +78,8 @@ func LoadAllPlugins(namespace string, searchPath []string, selections []plugin.S // loadPlugin loads an individual plugin by instantiating a plugin driver with // the settings from the given plugin definition and selection func loadPlugin(namespace string, dfn plugin.Definition, masterAddress string) (plugin.Interface, error) { - cfg := &plugin.WorkerConfig{ - ResultType: dfn.ResultType, - } + // TODO(chuckha): We don't use the cfg for anything except passing a string around. Consider removing this struct. + cfg := &plugin.WorkerConfig{} logrus.Infof("Loading plugin driver %v", dfn.Driver) switch dfn.Driver { case "DaemonSet": @@ -97,107 +96,55 @@ func loadPlugin(namespace string, dfn plugin.Definition, masterAddress string) ( // scanPlugins looks for Plugin Definition files in the given directory, // and returns an array of PluginDefinition structs. func scanPlugins(dir string) ([]plugin.Definition, error) { - var plugins []plugin.Definition + var pluginDfns []plugin.Definition files, err := ioutil.ReadDir(dir) if err != nil { - return plugins, err + return nil, errors.Wrap(err, "failed to read plugin directory") } for _, file := range files { - var loaderFn loader - switch filepath.Ext(file.Name()) { - case ".yaml": - loaderFn = loadYAML - case ".json": - loaderFn = loadJSON - default: - logrus.Warningf("Unsupported plugin file detected %v", file.Name()) + if filepath.Ext(file.Name()) != ".tmpl" { + logrus.WithField("filename", file.Name()).Info("unknown template type") continue } - // Read the file into memory + // Read the template file into memory fullPath := path.Join(dir, file.Name()) - y, err := ioutil.ReadFile(fullPath) + pluginTemplate, err := ioutil.ReadFile(fullPath) if err != nil { - return plugins, err + return nil, err } - - pluginDef, err := loaderFn(y) + dfn, err := loadTemplate(pluginTemplate) if err != nil { - logrus.Warningf("Error unmarshalling bytes at %v: %v", fullPath, err) + logrus.WithError(err).WithField("filename", file.Name()).Info("failed to load plugin") continue } - - // Load it into a proper PluginDefinition. If we can't, just - // warn. If they've selected this plugin in their config, - // they'll get an error then. - err = loadPluginDefinition(pluginDef) - if err != nil { - logrus.Warningf("Error loading plugin at %v: %v", fullPath, err) - continue - } - - plugins = append(plugins, *pluginDef) + pluginDfns = append(pluginDfns, *dfn) } - return plugins, err -} - -type loader func([]byte) (*plugin.Definition, error) - -func loadYAML(yamlBytes []byte) (*plugin.Definition, error) { - var ret plugin.Definition - err := yaml.Unmarshal(yamlBytes, &ret) - return &ret, err -} - -func loadJSON(jsonBytes []byte) (*plugin.Definition, error) { - var ret plugin.Definition - err := json.Unmarshal(jsonBytes, &ret) - return &ret, err + return pluginDfns, err } -// loadPluginDefinition takes a plugin.Definition and makes a real plugin.Definition -func loadPluginDefinition(ret *plugin.Definition) error { - // Validate it - if ret.Driver == "" { - return errors.Errorf("No driver specified in plugin file") - } - if ret.ResultType == "" { - return errors.Errorf("No resultType specified in plugin file") - } - if ret.Name == "" { - return errors.Errorf("No name specified in plugin file") - } - if ret.RawPodSpec == nil { - return errors.Errorf("No pod spec specified in plugin file") - } - - // Construct a pod spec from the ConfigMap data. We can't decode it - // directly since a PodSpec is not a runtime.Object (it doesn't - // have ObjectMeta attributes like Kind and Metadata), so we: - - // make a fake pod as a map[string]interface{}, and load the - // plugin config into its spec - placeholderPodMap := map[string]interface{}{ - "apiVersion": "v1", - "kind": "Pod", - "spec": ret.RawPodSpec, +func loadTemplate(tmpl []byte) (*plugin.Definition, error) { + t, err := template.New("plugin").Parse(string(tmpl)) + if err != nil { + return nil, errors.Wrap(err, "failed to parse template") } - - // serialize the result into YAML - placeholderPodYaml, err := yaml.Marshal(placeholderPodMap) + var b bytes.Buffer + // We just trying to get a kubernetes object here we don't really care about values rn + err = t.Execute(&b, &plugin.DefinitionTemplateData{}) if err != nil { - return err + return nil, errors.Wrap(err, "failed to execute template") } - - // Decode *that* yaml into a Pod - var placeholderPod v1.Pod - if err := kuberuntime.DecodeInto(scheme.Codecs.UniversalDecoder(), placeholderPodYaml, &placeholderPod); err != nil { - logrus.Fatalf("Could not decode pod spec: %v", err) + var x unstructured.Unstructured + if err := kuberuntime.DecodeInto(scheme.Codecs.UniversalDecoder(), b.Bytes(), &x); err != nil { + return nil, errors.Wrap(err, "failed to turn executed template into an unstructured") } - ret.PodSpec = placeholderPod.Spec - - return nil + return &plugin.Definition{ + Driver: x.GetAnnotations()["sonobuoy-driver"], + Name: x.GetAnnotations()["sonobuoy-plugin"], + ResultType: x.GetAnnotations()["sonobuoy-result-type"], + Template: t, + }, nil } diff --git a/pkg/worker/config.go b/pkg/worker/config.go index efbe2c08a..54b19feac 100644 --- a/pkg/worker/config.go +++ b/pkg/worker/config.go @@ -47,6 +47,7 @@ func LoadConfig() (*plugin.WorkerConfig, error) { viper.BindEnv("masterurl", "MASTER_URL") viper.BindEnv("nodename", "NODE_NAME") viper.BindEnv("resultsdir", "RESULTS_DIR") + viper.BindEnv("resulttype", "RESULT_TYPE") setConfigDefaults(config) diff --git a/plugins.d/e2e.yaml b/plugins.d/e2e.tmpl similarity index 76% rename from plugins.d/e2e.yaml rename to plugins.d/e2e.tmpl index 748c3587e..238831bd6 100644 --- a/plugins.d/e2e.yaml +++ b/plugins.d/e2e.tmpl @@ -1,6 +1,15 @@ -name: e2e -driver: Job -resultType: e2e +apiVersion: v1 +kind: Pod +metadata: + name: "sonobuoy-e2e-job-{{.SessionID}}" + annotations: + "sonobuoy-plugin": "e2e" + "sonobuoy-driver": "Job" + labels: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + namespace: "{{.Namespace}}" spec: serviceAccountName: sonobuoy-serviceaccount tolerations: @@ -37,17 +46,15 @@ spec: fieldPath: spec.nodeName - name: RESULTS_DIR value: /tmp/results + - name: MASTER_URL + value: "{{.MasterAddress}}" + - name: RESULT_TYPE + value: "e2e" image: gcr.io/heptio-images/sonobuoy:latest imagePullPolicy: Always volumeMounts: - - name: config - mountPath: /etc/sonobuoy - name: results mountPath: /tmp/results volumes: - name: results emptyDir: {} - - name: config - configMap: - # This will be rewritten when the JobPlugin driver goes to launch the pod. - name: __SONOBUOY_CONFIGMAP__ diff --git a/plugins.d/systemd_logs.tmpl b/plugins.d/systemd_logs.tmpl new file mode 100644 index 000000000..7b96f43b4 --- /dev/null +++ b/plugins.d/systemd_logs.tmpl @@ -0,0 +1,87 @@ +apiVersion: extensions/v1beta1 +kind: DaemonSet +metadata: + name: "sonobuoy-systemd-logs-config-{{.SessionID}}" + annotations: + "sonobuoy-plugin": "systemd_logs" + "sonobuoy-driver": "DaemonSet" + "sonobuoy-result-type": "systemd_logs" + labels: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + namespace: {{.Namespace}} +spec: + selector: + "sonobuoy-run": "{{.SessionID}}" + template: + metadata: + component: sonobuoy + tier: analysis + "sonobuoy-run": "{{.SessionID}}" + spec: + tolerations: + - key: node-role.kubernetes.io/master + operator: Exists + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + hostNetwork: true + hostIPC: true + hostPID: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: systemd-logs + command: + - sh + - -c + - /get_systemd_logs.sh && sleep 3600 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: RESULTS_DIR + value: /tmp/results + - name: CHROOT_DIR + value: /node + image: gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - mountPath: /node + name: root + - mountPath: /tmp/results + name: results + - mountPath: /etc/sonobuoy + name: config + - name: sonobuoy-worker + command: + - sh + - -c + - /sonobuoy worker single-node -v 5 --logtostderr && sleep 3600 + env: + - name: NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: RESULTS_DIR + value: /tmp/results + - name: MASTER_URL + value: "{{.MasterAddress}}" + image: gcr.io/heptio-images/sonobuoy:latest + imagePullPolicy: Always + securityContext: + privileged: true + volumeMounts: + - mountPath: /tmp/results + name: results + volumes: + - name: root + hostPath: + path: / + - name: results + emptyDir: {} diff --git a/plugins.d/systemdlogs.yaml b/plugins.d/systemdlogs.yaml deleted file mode 100644 index bbb58e57c..000000000 --- a/plugins.d/systemdlogs.yaml +++ /dev/null @@ -1,73 +0,0 @@ -name: systemd_logs -driver: DaemonSet -resultType: systemd_logs -spec: - tolerations: - - key: node-role.kubernetes.io/master - operator: Exists - effect: NoSchedule - - key: CriticalAddonsOnly - operator: Exists - hostNetwork: true - hostIPC: true - hostPID: true - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: systemd-logs - command: - - sh - - -c - - /get_systemd_logs.sh && sleep 3600 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: RESULTS_DIR - value: /tmp/results - - name: CHROOT_DIR - value: /node - image: gcr.io/heptio-images/sonobuoy-plugin-systemd-logs:latest - imagePullPolicy: Always - securityContext: - privileged: true - volumeMounts: - - mountPath: /node - name: root - - mountPath: /tmp/results - name: results - - mountPath: /etc/sonobuoy - name: config - - name: sonobuoy-worker - command: - - sh - - -c - - /sonobuoy worker single-node -v 5 --logtostderr && sleep 3600 - env: - - name: NODE_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: RESULTS_DIR - value: /tmp/results - image: gcr.io/heptio-images/sonobuoy:latest - imagePullPolicy: Always - securityContext: - privileged: true - volumeMounts: - - mountPath: /tmp/results - name: results - - mountPath: /etc/sonobuoy - name: config - volumes: - - name: root - hostPath: - path: / - - name: results - emptyDir: {} - - name: config - configMap: - # This will be rewritten when the DaemonSetPlugin driver goes to launch the pod. - name: __SONOBUOY_CONFIGMAP__