diff --git a/cmd/preflight/cli/run.go b/cmd/preflight/cli/run.go index f90cfc685..cb72388e0 100644 --- a/cmd/preflight/cli/run.go +++ b/cmd/preflight/cli/run.go @@ -1,33 +1,11 @@ package cli import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" "path/filepath" - "time" troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1" - "github.com/replicatedhq/troubleshoot/pkg/k8sutil" - preflightrunner "github.com/replicatedhq/troubleshoot/pkg/preflight" "github.com/spf13/cobra" "github.com/spf13/viper" - "gopkg.in/yaml.v2" - corev1 "k8s.io/api/core/v1" - kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" ) func Run() *cobra.Command { @@ -64,265 +42,6 @@ func Run() *cobra.Command { return cmd } -func runPreflightsCRD(v *viper.Viper) error { - troubleshootClient, err := createTroubleshootK8sClient() - if err != nil { - return err - } - - preflightName := v.GetString("preflight") - if preflightName == "" { - preflights, err := troubleshootClient.Preflights(v.GetString("namespace")).List(metav1.ListOptions{}) - if err != nil { - return err - } - - if len(preflights.Items) == 1 { - preflightName = preflights.Items[0].Name - } - } - - if preflightName == "" { - return errors.New("unable to preflight, try using the --preflight flags") - } - - // generate a unique name - now := time.Now() - suffix := fmt.Sprintf("%d", now.Unix()) - - preflightJobName := fmt.Sprintf("%s-job-%s", preflightName, suffix[len(suffix)-4:]) - preflightJob := troubleshootv1beta1.PreflightJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: preflightJobName, - Namespace: v.GetString("namespace"), - }, - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "preflightjob.troubleshoot.replicated.com", - }, - Spec: troubleshootv1beta1.PreflightJobSpec{ - Preflight: troubleshootv1beta1.PreflightRef{ - Name: preflightName, - Namespace: v.GetString("namespace"), - }, - Image: v.GetString("image"), - ImagePullPolicy: v.GetString("pullpolicy"), - CollectorImage: v.GetString("collector-image"), - CollectorImagePullPolicy: v.GetString("collector-pullpolicy"), - }, - } - if _, err := troubleshootClient.PreflightJobs(v.GetString("namespace")).Create(&preflightJob); err != nil { - return err - } - - // Poll the status of the Custom Resource for it to include a callback - var found *troubleshootv1beta1.PreflightJob - start := time.Now() - for { - current, err := troubleshootClient.PreflightJobs(v.GetString("namespace")).Get(preflightJobName, metav1.GetOptions{}) - if err != nil && kuberneteserrors.IsNotFound(err) { - continue - } else if err != nil { - return err - } - - if current.Status.IsServerReady { - found = current - break - } - - if time.Now().Sub(start) > time.Duration(time.Second*10) { - return errors.New("preflightjob failed to start") - } - - time.Sleep(time.Millisecond * 200) - } - - // Connect to the callback - stopChan, err := k8sutil.PortForward(v.GetString("kubecontext"), 8000, 8000, found.Status.ServerPodNamespace, found.Status.ServerPodName) - if err != nil { - return err - } - - if err := receivePreflightResults(found.Namespace, found.Name); err != nil { - return err - } - - // Write - - close(stopChan) - return nil -} - -func runPreflightsNoCRD(v *viper.Viper, arg string) error { - preflightContent := "" - if !isURL(arg) { - if _, err := os.Stat(arg); os.IsNotExist(err) { - return fmt.Errorf("%s was not found", arg) - } - - b, err := ioutil.ReadFile(arg) - if err != nil { - return err - } - - preflightContent = string(b) - } else { - resp, err := http.Get(arg) - if err != nil { - return err - } - defer resp.Body.Close() - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - - preflightContent = string(body) - } - - preflight := troubleshootv1beta1.Preflight{} - if err := yaml.Unmarshal([]byte(preflightContent), &preflight); err != nil { - return fmt.Errorf("unable to parse %s as a preflight", arg) - } - - cfg, err := config.GetConfig() - if err != nil { - return err - } - - client, err := client.New(cfg, client.Options{}) - if err != nil { - return err - } - clientset, err := kubernetes.NewForConfig(cfg) - if err != nil { - return err - } - restClient := clientset.CoreV1().RESTClient() - - // deploy an object that "owns" everything to aid in cleanup - owner := corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("preflight-%s-owner", preflight.Name), - Namespace: v.GetString("namespace"), - }, - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - Data: make(map[string]string), - } - if err := client.Create(context.Background(), &owner); err != nil { - return err - } - defer func() { - if err := client.Delete(context.Background(), &owner); err != nil { - fmt.Println("failed to clean up preflight.") - } - }() - - // deploy all collectors - desiredCollectors := make([]*troubleshootv1beta1.Collect, 0, 0) - for _, definedCollector := range preflight.Spec.Collectors { - desiredCollectors = append(desiredCollectors, definedCollector) - } - desiredCollectors = ensureCollectorInList(desiredCollectors, troubleshootv1beta1.Collect{ClusterInfo: &troubleshootv1beta1.ClusterInfo{}}) - desiredCollectors = ensureCollectorInList(desiredCollectors, troubleshootv1beta1.Collect{ClusterResources: &troubleshootv1beta1.ClusterResources{}}) - - podsCreated := make([]*corev1.Pod, 0, 0) - podsDeleted := make([]*corev1.Pod, 0, 0) - - resyncPeriod := time.Second - ctx := context.Background() - watchList := cache.NewListWatchFromClient(restClient, "pods", "", fields.Everything()) - _, controller := cache.NewInformer(watchList, &corev1.Pod{}, resyncPeriod, - cache.ResourceEventHandlerFuncs{ - UpdateFunc: func(oldObj interface{}, newObj interface{}) { - newPod, ok := newObj.(*corev1.Pod) - if !ok { - return - } - oldPod, ok := oldObj.(*corev1.Pod) - if !ok { - return - } - labels := newPod.Labels - troubleshootRole, ok := labels["troubleshoot-role"] - if !ok || troubleshootRole != "preflight" { - return - } - preflightName, ok := labels["preflight"] - if !ok || preflightName != preflight.Name { - return - } - - if oldPod.Status.Phase == newPod.Status.Phase { - return - } - - if newPod.Status.Phase != corev1.PodSucceeded { - return - } - - podLogOpts := corev1.PodLogOptions{} - - req := clientset.CoreV1().Pods(newPod.Namespace).GetLogs(newPod.Name, &podLogOpts) - podLogs, err := req.Stream() - if err != nil { - fmt.Println("get stream") - return - } - defer podLogs.Close() - - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - fmt.Println("copy logs") - return - } - - fmt.Printf(buf.String()) - - if err := client.Delete(context.Background(), newPod); err != nil { - fmt.Println("delete pod") - } - podsDeleted = append(podsDeleted, newPod) - }, - }) - go func() { - controller.Run(ctx.Done()) - }() - - s := runtime.NewScheme() - s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.ConfigMap{}) - for _, collector := range desiredCollectors { - _, pod, err := preflightrunner.CreateCollector(client, s, &owner, preflight.Name, v.GetString("namespace"), collector, v.GetString("image"), v.GetString("pullpolicy")) - if err != nil { - return err - } - podsCreated = append(podsCreated, pod) - } - - start := time.Now() - for { - if start.Add(time.Second * 30).Before(time.Now()) { - fmt.Println("timeout running preflight") - return err - } - - if len(podsDeleted) == len(podsCreated) { - break - } - - time.Sleep(time.Millisecond * 200) - } - - ctx.Done() - return nil -} - func ensureCollectorInList(list []*troubleshootv1beta1.Collect, collector troubleshootv1beta1.Collect) []*troubleshootv1beta1.Collect { for _, inList := range list { if collector.ClusterResources != nil && inList.ClusterResources != nil { diff --git a/cmd/preflight/cli/run_crd.go b/cmd/preflight/cli/run_crd.go new file mode 100644 index 000000000..002e99327 --- /dev/null +++ b/cmd/preflight/cli/run_crd.go @@ -0,0 +1,103 @@ +package cli + +import ( + "errors" + "fmt" + "time" + + troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1" + "github.com/replicatedhq/troubleshoot/pkg/k8sutil" + "github.com/spf13/viper" + kuberneteserrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func runPreflightsCRD(v *viper.Viper) error { + troubleshootClient, err := createTroubleshootK8sClient() + if err != nil { + return err + } + + preflightName := v.GetString("preflight") + if preflightName == "" { + preflights, err := troubleshootClient.Preflights(v.GetString("namespace")).List(metav1.ListOptions{}) + if err != nil { + return err + } + + if len(preflights.Items) == 1 { + preflightName = preflights.Items[0].Name + } + } + + if preflightName == "" { + return errors.New("unable to preflight, try using the --preflight flags") + } + + // generate a unique name + now := time.Now() + suffix := fmt.Sprintf("%d", now.Unix()) + + preflightJobName := fmt.Sprintf("%s-job-%s", preflightName, suffix[len(suffix)-4:]) + preflightJob := troubleshootv1beta1.PreflightJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: preflightJobName, + Namespace: v.GetString("namespace"), + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "preflightjob.troubleshoot.replicated.com", + }, + Spec: troubleshootv1beta1.PreflightJobSpec{ + Preflight: troubleshootv1beta1.PreflightRef{ + Name: preflightName, + Namespace: v.GetString("namespace"), + }, + Image: v.GetString("image"), + ImagePullPolicy: v.GetString("pullpolicy"), + CollectorImage: v.GetString("collector-image"), + CollectorImagePullPolicy: v.GetString("collector-pullpolicy"), + }, + } + if _, err := troubleshootClient.PreflightJobs(v.GetString("namespace")).Create(&preflightJob); err != nil { + return err + } + + // Poll the status of the Custom Resource for it to include a callback + var found *troubleshootv1beta1.PreflightJob + start := time.Now() + for { + current, err := troubleshootClient.PreflightJobs(v.GetString("namespace")).Get(preflightJobName, metav1.GetOptions{}) + if err != nil && kuberneteserrors.IsNotFound(err) { + continue + } else if err != nil { + return err + } + + if current.Status.IsServerReady { + found = current + break + } + + if time.Now().Sub(start) > time.Duration(time.Second*10) { + return errors.New("preflightjob failed to start") + } + + time.Sleep(time.Millisecond * 200) + } + + // Connect to the callback + stopChan, err := k8sutil.PortForward(v.GetString("kubecontext"), 8000, 8000, found.Status.ServerPodNamespace, found.Status.ServerPodName) + if err != nil { + return err + } + + if err := receivePreflightResults(found.Namespace, found.Name); err != nil { + return err + } + + // Write + + close(stopChan) + return nil +} diff --git a/cmd/preflight/cli/run_nocrd.go b/cmd/preflight/cli/run_nocrd.go new file mode 100644 index 000000000..e692ae0d1 --- /dev/null +++ b/cmd/preflight/cli/run_nocrd.go @@ -0,0 +1,267 @@ +package cli + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "time" + + analyzerunner "github.com/replicatedhq/troubleshoot/pkg/analyze" + troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1" + preflightrunner "github.com/replicatedhq/troubleshoot/pkg/preflight" + "github.com/spf13/viper" + "gopkg.in/yaml.v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +func runPreflightsNoCRD(v *viper.Viper, arg string) error { + preflightContent := "" + if !isURL(arg) { + if _, err := os.Stat(arg); os.IsNotExist(err) { + return fmt.Errorf("%s was not found", arg) + } + + b, err := ioutil.ReadFile(arg) + if err != nil { + return err + } + + preflightContent = string(b) + } else { + resp, err := http.Get(arg) + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + preflightContent = string(body) + } + + preflight := troubleshootv1beta1.Preflight{} + if err := yaml.Unmarshal([]byte(preflightContent), &preflight); err != nil { + return fmt.Errorf("unable to parse %s as a preflight", arg) + } + + allCollectedData, err := runCollectors(v, preflight) + if err != nil { + return err + } + + getCollectedFileContents := func(fileName string) ([]byte, error) { + contents, ok := allCollectedData[fileName] + if !ok { + return nil, errors.New("not found") + } + + return contents, nil + } + + for _, analyzer := range preflight.Spec.Analyzers { + analyzeResult, err := analyzerunner.Analyze(analyzer, getCollectedFileContents) + if err != nil { + return err + } + + fmt.Printf("%#v\n", analyzeResult) + } + return nil +} + +func runCollectors(v *viper.Viper, preflight troubleshootv1beta1.Preflight) (map[string][]byte, error) { + cfg, err := config.GetConfig() + if err != nil { + return nil, err + } + + client, err := client.New(cfg, client.Options{}) + if err != nil { + return nil, err + } + clientset, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + restClient := clientset.CoreV1().RESTClient() + + // deploy an object that "owns" everything to aid in cleanup + owner := corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("preflight-%s-owner", preflight.Name), + Namespace: v.GetString("namespace"), + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ConfigMap", + }, + Data: make(map[string]string), + } + if err := client.Create(context.Background(), &owner); err != nil { + return nil, err + } + defer func() { + if err := client.Delete(context.Background(), &owner); err != nil { + fmt.Println("failed to clean up preflight.") + } + }() + + // deploy all collectors + desiredCollectors := make([]*troubleshootv1beta1.Collect, 0, 0) + for _, definedCollector := range preflight.Spec.Collectors { + desiredCollectors = append(desiredCollectors, definedCollector) + } + desiredCollectors = ensureCollectorInList(desiredCollectors, troubleshootv1beta1.Collect{ClusterInfo: &troubleshootv1beta1.ClusterInfo{}}) + desiredCollectors = ensureCollectorInList(desiredCollectors, troubleshootv1beta1.Collect{ClusterResources: &troubleshootv1beta1.ClusterResources{}}) + + podsCreated := make([]*corev1.Pod, 0, 0) + podsDeleted := make([]*corev1.Pod, 0, 0) + allCollectedData := make(map[string][]byte) + + resyncPeriod := time.Second + ctx := context.Background() + watchList := cache.NewListWatchFromClient(restClient, "pods", "", fields.Everything()) + _, controller := cache.NewInformer(watchList, &corev1.Pod{}, resyncPeriod, + cache.ResourceEventHandlerFuncs{ + UpdateFunc: func(oldObj interface{}, newObj interface{}) { + newPod, ok := newObj.(*corev1.Pod) + if !ok { + return + } + oldPod, ok := oldObj.(*corev1.Pod) + if !ok { + return + } + labels := newPod.Labels + troubleshootRole, ok := labels["troubleshoot-role"] + if !ok || troubleshootRole != "preflight" { + return + } + preflightName, ok := labels["preflight"] + if !ok || preflightName != preflight.Name { + return + } + + if oldPod.Status.Phase == newPod.Status.Phase { + return + } + + if newPod.Status.Phase != corev1.PodSucceeded { + return + } + + podLogOpts := corev1.PodLogOptions{} + + req := clientset.CoreV1().Pods(newPod.Namespace).GetLogs(newPod.Name, &podLogOpts) + podLogs, err := req.Stream() + if err != nil { + fmt.Println("get stream") + return + } + defer podLogs.Close() + + buf := new(bytes.Buffer) + _, err = io.Copy(buf, podLogs) + if err != nil { + fmt.Println("copy logs") + return + } + + collectedData, err := parseCollectorOutput(buf.String()) + if err != nil { + fmt.Printf("parse collected data: %v\n", err) + return + } + for k, v := range collectedData { + allCollectedData[k] = v + } + + if err := client.Delete(context.Background(), newPod); err != nil { + fmt.Println("delete pod") + } + podsDeleted = append(podsDeleted, newPod) + }, + }) + go func() { + controller.Run(ctx.Done()) + }() + + s := runtime.NewScheme() + s.AddKnownTypes(schema.GroupVersion{Group: "", Version: "v1"}, &corev1.ConfigMap{}) + for _, collector := range desiredCollectors { + _, pod, err := preflightrunner.CreateCollector(client, s, &owner, preflight.Name, v.GetString("namespace"), collector, v.GetString("image"), v.GetString("pullpolicy")) + if err != nil { + return nil, err + } + podsCreated = append(podsCreated, pod) + } + + start := time.Now() + for { + if start.Add(time.Second * 30).Before(time.Now()) { + fmt.Println("timeout running preflight") + return nil, err + } + + if len(podsDeleted) == len(podsCreated) { + break + } + + time.Sleep(time.Millisecond * 200) + } + + ctx.Done() + + return allCollectedData, nil +} + +func parseCollectorOutput(output string) (map[string][]byte, error) { + input := make(map[string]interface{}) + files := make(map[string][]byte) + if err := json.Unmarshal([]byte(output), &input); err != nil { + return nil, err + } + + for filename, maybeContents := range input { + fileDir, fileName := filepath.Split(filename) + + switch maybeContents.(type) { + case string: + decoded, err := base64.StdEncoding.DecodeString(maybeContents.(string)) + if err != nil { + return nil, err + } + files[filepath.Join(fileDir, fileName)] = decoded + + case map[string]interface{}: + for k, v := range maybeContents.(map[string]interface{}) { + decoded, err := base64.StdEncoding.DecodeString(v.(string)) + if err != nil { + return nil, err + } + files[filepath.Join(fileDir, fileName, k)] = decoded + } + } + } + + return files, nil +} diff --git a/config/crds/troubleshoot.replicated.com_collectors.yaml b/config/crds/troubleshoot.replicated.com_collectors.yaml index dae73ead0..e84085e0b 100644 --- a/config/crds/troubleshoot.replicated.com_collectors.yaml +++ b/config/crds/troubleshoot.replicated.com_collectors.yaml @@ -392,9 +392,9 @@ spec: spec: items: properties: - cluster-info: + clusterInfo: type: object - cluster-resources: + clusterResources: type: object type: object type: array diff --git a/config/crds/troubleshoot.replicated.com_preflights.yaml b/config/crds/troubleshoot.replicated.com_preflights.yaml index 470d6920d..78d3db5c9 100644 --- a/config/crds/troubleshoot.replicated.com_preflights.yaml +++ b/config/crds/troubleshoot.replicated.com_preflights.yaml @@ -476,9 +476,9 @@ spec: collectors: items: properties: - cluster-info: + clusterInfo: type: object - cluster-resources: + clusterResources: type: object type: object type: array diff --git a/config/samples/troubleshoot_v1beta1_preflight.yaml b/config/samples/troubleshoot_v1beta1_preflight.yaml index 75b996a38..5ff553596 100644 --- a/config/samples/troubleshoot_v1beta1_preflight.yaml +++ b/config/samples/troubleshoot_v1beta1_preflight.yaml @@ -7,7 +7,7 @@ spec: - clusterVersion: outcomes: - fail: - when: "< 1.13.0" + when: "< 1.14.0" message: You need more kubernetes - warn: when: "< 1.15.0" diff --git a/go.mod b/go.mod index 09586f093..8064a3e44 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/replicatedhq/troubleshoot go 1.12 require ( + github.com/blang/semver v3.5.1+incompatible github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect github.com/dsnet/compress v0.0.1 // indirect github.com/gin-gonic/gin v1.4.0 diff --git a/go.sum b/go.sum index b20eda31e..8a61c5c53 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= diff --git a/pkg/analyze/analyzer.go b/pkg/analyze/analyzer.go new file mode 100644 index 000000000..9a2ea7d0e --- /dev/null +++ b/pkg/analyze/analyzer.go @@ -0,0 +1,24 @@ +package analyzer + +import ( + "errors" + + troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1" +) + +type AnalyzeResult struct { + IsPass bool + IsFail bool + IsWarn bool + + Message string + URI string +} + +func Analyze(analyzer *troubleshootv1beta1.Analyze, getCollectedFileContents func(string) ([]byte, error)) (*AnalyzeResult, error) { + if analyzer.ClusterVersion != nil { + return analyzeClusterVersion(analyzer.ClusterVersion, getCollectedFileContents) + } + + return nil, errors.New("invalid analyer") +} diff --git a/pkg/analyze/cluster_version.go b/pkg/analyze/cluster_version.go new file mode 100644 index 000000000..a9bd8d466 --- /dev/null +++ b/pkg/analyze/cluster_version.go @@ -0,0 +1,69 @@ +package analyzer + +import ( + "encoding/json" + "errors" + "strings" + + "github.com/blang/semver" + troubleshootv1beta1 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta1" + "github.com/replicatedhq/troubleshoot/pkg/collect" +) + +func analyzeClusterVersion(analyzer *troubleshootv1beta1.ClusterVersion, getCollectedFileContents func(string) ([]byte, error)) (*AnalyzeResult, error) { + clusterInfo, err := getCollectedFileContents("cluster-info/cluster_version.json") + if err != nil { + return nil, err + } + + collectorClusterVersion := collect.ClusterVersion{} + if err := json.Unmarshal(clusterInfo, &collectorClusterVersion); err != nil { + return nil, err + } + + k8sVersion, err := semver.Make(strings.TrimLeft(collectorClusterVersion.String, "v")) + if err != nil { + return nil, err + } + + result := AnalyzeResult{} + for _, outcome := range analyzer.Outcomes { + when := "" + message := "" + uri := "" + + result = AnalyzeResult{} + if outcome.Fail != nil { + result.IsFail = true + when = outcome.Fail.When + message = outcome.Fail.Message + uri = outcome.Fail.URI + } else if outcome.Warn != nil { + result.IsWarn = true + when = outcome.Warn.When + message = outcome.Warn.Message + uri = outcome.Warn.URI + } else if outcome.Pass != nil { + result.IsPass = true + when = outcome.Pass.When + message = outcome.Pass.Message + uri = outcome.Pass.URI + } else { + return nil, errors.New("empty outcome") + } + + whenRange, err := semver.ParseRange(when) + if err != nil { + return nil, err + } + + if whenRange(k8sVersion) { + result.Message = message + result.URI = uri + + return &result, nil + } + } + + return &AnalyzeResult{}, nil +}