From bec29147f7b33e5fcd4713d8967bb3e509595bb7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Mon, 13 Sep 2021 16:27:47 +0200 Subject: [PATCH 01/25] refactor: simplifying the logic for merge method --- pkg/controller/status/status.go | 146 ++++++++++++++++++-------------- 1 file changed, 84 insertions(+), 62 deletions(-) diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index a73c1d302..6728059db 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -142,10 +142,17 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster // calculate the current controller state var last time.Time - var reason string + var errorReason string var errs []string - var uploadErrorReason, uploadErrorMessage, disabledReason, disabledMessage, downloadReason, downloadMessage string + var uploadErrorReason, + uploadErrorMessage, + disabledReason, + disabledMessage, + downloadReason, + downloadMessage string + allReady := true + for i, source := range c.Sources() { summary, ready := source.CurrentStatus() if !ready { @@ -185,7 +192,7 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster } if degradingFailure { - reason = summary.Reason + errorReason = summary.Reason errs = append(errs, summary.Message) } @@ -193,16 +200,18 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster last = summary.LastTransitionTime } } + var errorMessage string + switch len(errs) { case 0: case 1: - if len(reason) == 0 { - reason = "UnknownError" + if len(errorReason) == 0 { + errorReason = "UnknownError" } errorMessage = errs[0] default: - reason = "MultipleFailures" + errorReason = "MultipleFailures" sort.Strings(errs) errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) } @@ -252,61 +261,10 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster default: // once we've initialized set Failing and Disabled as best we know - if len(disabledMessage) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: OperatorDisabled, - Status: configv1.ConditionTrue, - Reason: disabledReason, - Message: disabledMessage, - }) - } else { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: OperatorDisabled, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - }) - } - - if len(errorMessage) > 0 { - klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: reason, - Message: errorMessage, - }) - } else { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorDegraded, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - }) - } - - if len(uploadErrorReason) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: UploadDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: uploadErrorReason, - Message: uploadErrorMessage, - }) - } else { - removeOperatorStatusCondition(&existing.Status.Conditions, UploadDegraded) - } - - if len(downloadReason) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: InsightsDownloadDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: downloadReason, - Message: downloadMessage, - }) - } else { - removeOperatorStatusCondition(&existing.Status.Conditions, InsightsDownloadDegraded) - } + setDisabledOperatorStatusCondition(existing, disabledReason, disabledMessage) + setDegradedOperatorStatusCondition(existing, last, errorReason, errorMessage) + setUploadDegradedOperatorStatusCondition(existing, last, uploadErrorReason, uploadErrorMessage) + setInsightsDownloadDegradedOperatorStatusCondition(existing, last, downloadReason, downloadMessage) } // once the operator is running it is always considered available @@ -346,7 +304,7 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse, LastTransitionTime: metav1.Time{Time: last}, - Reason: reason, + Reason: errorReason, Message: disabledMessage, }) @@ -374,6 +332,70 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster return existing } +func setInsightsDownloadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, downloadReason string, downloadMessage string) { + if len(downloadReason) > 0 { + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: InsightsDownloadDegraded, + Status: configv1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: last}, + Reason: downloadReason, + Message: downloadMessage, + }) + } else { + removeOperatorStatusCondition(&existing.Status.Conditions, InsightsDownloadDegraded) + } +} + +func setUploadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, uploadErrorReason string, uploadErrorMessage string) { + if len(uploadErrorReason) > 0 { + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: UploadDegraded, + Status: configv1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: last}, + Reason: uploadErrorReason, + Message: uploadErrorMessage, + }) + } else { + removeOperatorStatusCondition(&existing.Status.Conditions, UploadDegraded) + } +} + +func setDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, errorReason string, errorMessage string,) { + if len(errorMessage) > 0 { + klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorDegraded, + Status: configv1.ConditionTrue, + LastTransitionTime: metav1.Time{Time: last}, + Reason: errorReason, + Message: errorMessage, + }) + } else { + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorDegraded, + Status: configv1.ConditionFalse, + Reason: "AsExpected", + }) + } +} + +func setDisabledOperatorStatusCondition(existing *configv1.ClusterOperator, disabledMessage string, disabledReason string) { + if len(disabledMessage) > 0 { + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: OperatorDisabled, + Status: configv1.ConditionTrue, + Reason: disabledReason, + Message: disabledMessage, + }) + } else { + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: OperatorDisabled, + Status: configv1.ConditionFalse, + Reason: "AsExpected", + }) + } +} + // Start starts the periodic checking of sources. func (c *Controller) Start(ctx context.Context) error { if err := c.updateStatus(ctx, true); err != nil { From 28986b2a2e1ff73a0bf68413ce61967b9230aabb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Tue, 14 Sep 2021 10:37:04 +0200 Subject: [PATCH 02/25] refactor: extracting methods --- pkg/controller/status/status.go | 68 +++++++++++++++++++-------------- 1 file changed, 39 insertions(+), 29 deletions(-) diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 6728059db..6bf8d45d3 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -201,20 +201,19 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster } } + // handling errors var errorMessage string - - switch len(errs) { - case 0: - case 1: + if len(errs) > 1 { + errorReason = "MultipleFailures" + sort.Strings(errs) + errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) + } else { if len(errorReason) == 0 { errorReason = "UnknownError" } errorMessage = errs[0] - default: - errorReason = "MultipleFailures" - sort.Strings(errs) - errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) } + // disabled state only when it's disabled by config. It means that gathering will not happen if !c.configurator.Config().Report { disabledReason = "Disabled" @@ -239,6 +238,35 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute // update the disabled and failing conditions + c.updateDisabledAndFailingConditions(existing, isInitializing, last, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, downloadReason, downloadMessage) + + // once the operator is running it is always considered available + setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorAvailable, + Status: configv1.ConditionTrue, + Reason: "AsExpected", + }) + + // update the Progressing condition with a summary of the current state + c.updateProcessingConditionWithSummary(existing, isInitializing, last, errorMessage, disabledMessage, errorReason) + + if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { + existing.Status.Versions = []configv1.OperandVersion{ + {Name: "operator", Version: release}, + } + } + + if data, err := json.Marshal(reported); err != nil { + klog.Errorf("Unable to marshal status extension: %v", err) + } else { + existing.Status.Extension.Raw = data + } + return existing +} + +func (c *Controller) updateDisabledAndFailingConditions(existing *configv1.ClusterOperator, isInitializing bool, + last time.Time, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, + downloadReason, downloadMessage string) { switch { case isInitializing: // the disabled condition is optional, but set it now if we already know we're disabled @@ -266,15 +294,10 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster setUploadDegradedOperatorStatusCondition(existing, last, uploadErrorReason, uploadErrorMessage) setInsightsDownloadDegradedOperatorStatusCondition(existing, last, downloadReason, downloadMessage) } +} - // once the operator is running it is always considered available - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorAvailable, - Status: configv1.ConditionTrue, - Reason: "AsExpected", - }) - - // update the Progressing condition with a summary of the current state +func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.ClusterOperator, + isInitializing bool, last time.Time, errorMessage, disabledMessage, errorReason string) { switch { case isInitializing: klog.V(4).Infof("The operator is still being initialized") @@ -317,19 +340,6 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster Message: "Monitoring the cluster", }) } - - if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { - existing.Status.Versions = []configv1.OperandVersion{ - {Name: "operator", Version: release}, - } - } - - if data, err := json.Marshal(reported); err != nil { - klog.Errorf("Unable to marshal status extension: %v", err) - } else { - existing.Status.Extension.Raw = data - } - return existing } func setInsightsDownloadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, downloadReason string, downloadMessage string) { From a32b4db77b648832b1256f7c550317433bf40d2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Tue, 14 Sep 2021 14:27:27 +0200 Subject: [PATCH 03/25] refactor: improving variable names --- pkg/controller/status/status.go | 88 +++++++++++++++++---------------- 1 file changed, 45 insertions(+), 43 deletions(-) diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 6bf8d45d3..08431d3e6 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -130,10 +130,10 @@ func (c *Controller) Sources() []controllerstatus.Interface { return c.sources } -func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.ClusterOperator { //nolint: funlen, gocyclo +func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1.ClusterOperator { // prime the object if it does not exist - if existing == nil { - existing = &configv1.ClusterOperator{ + if clusterOperator == nil { + clusterOperator = &configv1.ClusterOperator{ ObjectMeta: metav1.ObjectMeta{ Name: c.name, }, @@ -141,7 +141,7 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster } // calculate the current controller state - var last time.Time + var lastTransition time.Time var errorReason string var errs []string var uploadErrorReason, @@ -172,11 +172,11 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster if summary.Operation == controllerstatus.Uploading { if summary.Count < uploadFailuresCountThreshold { - klog.V(4).Infof("Number of last upload failures %d lower than threshold %d. Not marking as degraded.", + klog.V(4).Infof("Number of lastTransition upload failures %d lower than threshold %d. Not marking as degraded.", summary.Count, uploadFailuresCountThreshold) } else { degradingFailure = true - klog.V(4).Infof("Number of last upload failures %d exceeded than threshold %d. Marking as degraded.", + klog.V(4).Infof("Number of lastTransition upload failures %d exceeded than threshold %d. Marking as degraded.", summary.Count, uploadFailuresCountThreshold) } uploadErrorReason = summary.Reason @@ -196,8 +196,8 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster errs = append(errs, summary.Message) } - if last.Before(summary.LastTransitionTime) { - last = summary.LastTransitionTime + if lastTransition.Before(summary.LastTransitionTime) { + lastTransition = summary.LastTransitionTime } } @@ -211,7 +211,9 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster if len(errorReason) == 0 { errorReason = "UnknownError" } - errorMessage = errs[0] + if len(errs) > 0 { + errorMessage = errs[0] + } } // disabled state only when it's disabled by config. It means that gathering will not happen @@ -220,10 +222,10 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster disabledMessage = "Health reporting is disabled" } - existing = existing.DeepCopy() + clusterOperator = clusterOperator.DeepCopy() now := time.Now() if len(c.namespace) > 0 { - existing.Status.RelatedObjects = []configv1.ObjectReference{ + clusterOperator.Status.RelatedObjects = []configv1.ObjectReference{ {Resource: "namespaces", Name: c.namespace}, {Group: "apps", Resource: "deployments", Namespace: c.namespace, Name: "insights-operator"}, {Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"}, @@ -238,20 +240,20 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute // update the disabled and failing conditions - c.updateDisabledAndFailingConditions(existing, isInitializing, last, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, downloadReason, downloadMessage) + updateDisabledAndFailingConditions(clusterOperator, isInitializing, lastTransition, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, downloadReason, downloadMessage) // once the operator is running it is always considered available - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorAvailable, Status: configv1.ConditionTrue, Reason: "AsExpected", }) // update the Progressing condition with a summary of the current state - c.updateProcessingConditionWithSummary(existing, isInitializing, last, errorMessage, disabledMessage, errorReason) + updateProcessingConditionWithSummary(clusterOperator, isInitializing, lastTransition, errorMessage, disabledMessage, errorReason) if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { - existing.Status.Versions = []configv1.OperandVersion{ + clusterOperator.Status.Versions = []configv1.OperandVersion{ {Name: "operator", Version: release}, } } @@ -259,19 +261,19 @@ func (c *Controller) merge(existing *configv1.ClusterOperator) *configv1.Cluster if data, err := json.Marshal(reported); err != nil { klog.Errorf("Unable to marshal status extension: %v", err) } else { - existing.Status.Extension.Raw = data + clusterOperator.Status.Extension.Raw = data } - return existing + return clusterOperator } -func (c *Controller) updateDisabledAndFailingConditions(existing *configv1.ClusterOperator, isInitializing bool, +func updateDisabledAndFailingConditions(clusterOperator *configv1.ClusterOperator, isInitializing bool, last time.Time, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, downloadReason, downloadMessage string) { switch { case isInitializing: // the disabled condition is optional, but set it now if we already know we're disabled if len(disabledReason) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: OperatorDisabled, Status: configv1.ConditionTrue, Reason: disabledReason, @@ -279,8 +281,8 @@ func (c *Controller) updateDisabledAndFailingConditions(existing *configv1.Clust }) } - if findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorDegraded) == nil { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + if findOperatorStatusCondition(clusterOperator.Status.Conditions, configv1.OperatorDegraded) == nil { + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse, Reason: "AsExpected", @@ -289,22 +291,22 @@ func (c *Controller) updateDisabledAndFailingConditions(existing *configv1.Clust default: // once we've initialized set Failing and Disabled as best we know - setDisabledOperatorStatusCondition(existing, disabledReason, disabledMessage) - setDegradedOperatorStatusCondition(existing, last, errorReason, errorMessage) - setUploadDegradedOperatorStatusCondition(existing, last, uploadErrorReason, uploadErrorMessage) - setInsightsDownloadDegradedOperatorStatusCondition(existing, last, downloadReason, downloadMessage) + setDisabledOperatorStatusCondition(clusterOperator, disabledReason, disabledMessage) + setDegradedOperatorStatusCondition(clusterOperator, last, errorReason, errorMessage) + setUploadDegradedOperatorStatusCondition(clusterOperator, last, uploadErrorReason, uploadErrorMessage) + setInsightsDownloadDegradedOperatorStatusCondition(clusterOperator, last, downloadReason, downloadMessage) } } -func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.ClusterOperator, +func updateProcessingConditionWithSummary(clusterOperator *configv1.ClusterOperator, isInitializing bool, last time.Time, errorMessage, disabledMessage, errorReason string) { switch { case isInitializing: klog.V(4).Infof("The operator is still being initialized") // if we're still starting up and some sources are not ready, initialize the conditions // but don't update - if findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorProgressing) == nil { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + if findOperatorStatusCondition(clusterOperator.Status.Conditions, configv1.OperatorProgressing) == nil { + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorProgressing, Status: configv1.ConditionTrue, Reason: "Initializing", @@ -314,7 +316,7 @@ func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.Clu case len(errorMessage) > 0: klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse, Reason: "Degraded", @@ -323,7 +325,7 @@ func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.Clu case len(disabledMessage) > 0: klog.V(4).Infof("The operator is marked as disabled") - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse, LastTransitionTime: metav1.Time{Time: last}, @@ -333,7 +335,7 @@ func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.Clu default: klog.V(4).Infof("The operator is healthy") - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorProgressing, Status: configv1.ConditionFalse, Reason: "AsExpected", @@ -342,9 +344,9 @@ func (c *Controller) updateProcessingConditionWithSummary(existing *configv1.Clu } } -func setInsightsDownloadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, downloadReason string, downloadMessage string) { +func setInsightsDownloadDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, downloadReason string, downloadMessage string) { if len(downloadReason) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: InsightsDownloadDegraded, Status: configv1.ConditionTrue, LastTransitionTime: metav1.Time{Time: last}, @@ -352,13 +354,13 @@ func setInsightsDownloadDegradedOperatorStatusCondition(existing *configv1.Clust Message: downloadMessage, }) } else { - removeOperatorStatusCondition(&existing.Status.Conditions, InsightsDownloadDegraded) + removeOperatorStatusCondition(&clusterOperator.Status.Conditions, InsightsDownloadDegraded) } } -func setUploadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, uploadErrorReason string, uploadErrorMessage string) { +func setUploadDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, uploadErrorReason string, uploadErrorMessage string) { if len(uploadErrorReason) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: UploadDegraded, Status: configv1.ConditionTrue, LastTransitionTime: metav1.Time{Time: last}, @@ -366,14 +368,14 @@ func setUploadDegradedOperatorStatusCondition(existing *configv1.ClusterOperator Message: uploadErrorMessage, }) } else { - removeOperatorStatusCondition(&existing.Status.Conditions, UploadDegraded) + removeOperatorStatusCondition(&clusterOperator.Status.Conditions, UploadDegraded) } } -func setDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last time.Time, errorReason string, errorMessage string,) { +func setDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, errorReason string, errorMessage string,) { if len(errorMessage) > 0 { klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorDegraded, Status: configv1.ConditionTrue, LastTransitionTime: metav1.Time{Time: last}, @@ -381,7 +383,7 @@ func setDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last Message: errorMessage, }) } else { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse, Reason: "AsExpected", @@ -389,16 +391,16 @@ func setDegradedOperatorStatusCondition(existing *configv1.ClusterOperator, last } } -func setDisabledOperatorStatusCondition(existing *configv1.ClusterOperator, disabledMessage string, disabledReason string) { +func setDisabledOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, disabledMessage string, disabledReason string) { if len(disabledMessage) > 0 { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: OperatorDisabled, Status: configv1.ConditionTrue, Reason: disabledReason, Message: disabledMessage, }) } else { - setOperatorStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ Type: OperatorDisabled, Status: configv1.ConditionFalse, Reason: "AsExpected", From 3cf6071205cc74c892d10814d21d0e41f1799226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Tue, 14 Sep 2021 16:58:21 +0200 Subject: [PATCH 04/25] refactor: improves condition handling --- pkg/controller/status/conditions.go | 84 ++++++++ pkg/controller/status/status.go | 295 +++++++++------------------- 2 files changed, 174 insertions(+), 205 deletions(-) create mode 100644 pkg/controller/status/conditions.go diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go new file mode 100644 index 000000000..030c6f451 --- /dev/null +++ b/pkg/controller/status/conditions.go @@ -0,0 +1,84 @@ +package status + +import ( + v1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // OperatorDisabled defines the condition type when the operator's primary function has been disabled + OperatorDisabled v1.ClusterStatusConditionType = "Disabled" + // InsightsUploadDegraded defines the condition type (when set to True) when an archive can't be successfully uploaded + InsightsUploadDegraded v1.ClusterStatusConditionType = "UploadDegraded" + // InsightsDownloadDegraded defines the condition type (when set to True) when the Insights report can't be successfully downloaded + InsightsDownloadDegraded v1.ClusterStatusConditionType = "InsightsDownloadDegraded" +) + +type conditionsMap map[v1.ClusterStatusConditionType]v1.ClusterOperatorStatusCondition + +type conditions struct { + entryMap conditionsMap +} + +func newConditions(cos v1.ClusterOperatorStatus) *conditions { + entries := conditionsMap{} + for _, c := range cos.Conditions { + entries[c.Type] = c + } + return &conditions{ + entryMap: entries, + } +} + +func (c *conditions) setCondition(condition v1.ClusterStatusConditionType, + status v1.ConditionStatus, message, reason string, lastTime metav1.Time) { + entries := make(conditionsMap) + for k, v := range c.entryMap { + entries[k] = v + } + + existing, ok := c.entryMap[condition] + if !ok || existing.Status != status || existing.Reason != reason { + if lastTime.IsZero() { + lastTime = metav1.Now() + } + entries[condition] = v1.ClusterOperatorStatusCondition{ + Type: condition, + Status: status, + Reason: reason, + Message: message, + LastTransitionTime: lastTime, + } + } + + c.entryMap = entries +} + +func (c *conditions) removeCondition(condition v1.ClusterStatusConditionType) { + if !c.hasCondition(condition) { + return + } + + entries := make(conditionsMap) + for k, v := range c.entryMap { + if k == condition { + continue + } + entries[k] = v + } + + c.entryMap = entries +} + +func (c *conditions) hasCondition(condition v1.ClusterStatusConditionType) bool { + _, ok := c.entryMap[condition] + return ok +} + +func (c *conditions) findCondition(condition v1.ClusterStatusConditionType) *v1.ClusterOperatorStatusCondition { + existing, ok := c.entryMap[condition] + if ok { + return &existing + } + return nil +} diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 08431d3e6..a5687be5b 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -25,14 +25,8 @@ import ( ) const ( - // OperatorDisabled defines the condition type when the operator's primary funcion has been disabled - OperatorDisabled configv1.ClusterStatusConditionType = "Disabled" - // UploadDegraded defines the condition type (when set to True) when an archive can't be successfully uploaded - UploadDegraded configv1.ClusterStatusConditionType = "UploadDegraded" - // InsightsDownloadDegraded defines the condition type (when set to True) when a Insights report can't be successfully downloaded - InsightsDownloadDegraded configv1.ClusterStatusConditionType = "InsightsDownloadDegraded" // How many upload failures in a row we tolerate before starting reporting - // as UploadDegraded + // as InsightsUploadDegraded uploadFailuresCountThreshold = 5 // GatherFailuresCountThreshold defines how many gatherings can fail in a row before we report Degraded GatherFailuresCountThreshold = 5 @@ -54,18 +48,21 @@ type Configurator interface { type Controller struct { name string namespace string + client configv1client.ConfigV1Interface coreClient corev1client.CoreV1Interface + statusCh chan struct{} configurator Configurator - lock sync.Mutex sources []controllerstatus.Interface reported Reported start time.Time + + lock sync.Mutex } -// NewController creates a status controller, responsible for monitoring the operators status and updating the it's cluster status accordingly. +// NewController creates a status controller, responsible for monitoring the operators status and updating its cluster status accordingly. func NewController(client configv1client.ConfigV1Interface, coreClient corev1client.CoreV1Interface, configurator Configurator, namespace string) *Controller { @@ -221,36 +218,30 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. disabledReason = "Disabled" disabledMessage = "Health reporting is disabled" } + reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}} clusterOperator = clusterOperator.DeepCopy() now := time.Now() if len(c.namespace) > 0 { - clusterOperator.Status.RelatedObjects = []configv1.ObjectReference{ - {Resource: "namespaces", Name: c.namespace}, - {Group: "apps", Resource: "deployments", Namespace: c.namespace, Name: "insights-operator"}, - {Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"}, - {Resource: "secrets", Namespace: "openshift-config", Name: "support"}, - {Resource: "serviceaccounts", Namespace: c.namespace, Name: "gather"}, - {Resource: "serviceaccounts", Namespace: c.namespace, Name: "operator"}, - {Resource: "services", Namespace: c.namespace, Name: "metrics"}, - {Resource: "configmaps", Namespace: c.namespace, Name: "service-ca-bundle"}, - } + clusterOperator.Status.RelatedObjects = c.relatedObjects() } - reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}} isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute + cs := newConditions(clusterOperator.Status) + // update the disabled and failing conditions - updateDisabledAndFailingConditions(clusterOperator, isInitializing, lastTransition, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, downloadReason, downloadMessage) + updateDisabledAndFailingConditions(cs, isInitializing, lastTransition, + disabledReason, disabledMessage, + errorReason, errorMessage, + uploadErrorReason, uploadErrorMessage, + downloadReason, downloadMessage) // once the operator is running it is always considered available - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorAvailable, - Status: configv1.ConditionTrue, - Reason: "AsExpected", - }) + cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) // update the Progressing condition with a summary of the current state - updateProcessingConditionWithSummary(clusterOperator, isInitializing, lastTransition, errorMessage, disabledMessage, errorReason) + updateProcessingConditionWithSummary(cs, isInitializing, lastTransition, + errorMessage, errorReason, disabledMessage) if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { clusterOperator.Status.Versions = []configv1.OperandVersion{ @@ -266,145 +257,16 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. return clusterOperator } -func updateDisabledAndFailingConditions(clusterOperator *configv1.ClusterOperator, isInitializing bool, - last time.Time, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, - downloadReason, downloadMessage string) { - switch { - case isInitializing: - // the disabled condition is optional, but set it now if we already know we're disabled - if len(disabledReason) > 0 { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: OperatorDisabled, - Status: configv1.ConditionTrue, - Reason: disabledReason, - Message: disabledMessage, - }) - } - - if findOperatorStatusCondition(clusterOperator.Status.Conditions, configv1.OperatorDegraded) == nil { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorDegraded, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - }) - } - - default: - // once we've initialized set Failing and Disabled as best we know - setDisabledOperatorStatusCondition(clusterOperator, disabledReason, disabledMessage) - setDegradedOperatorStatusCondition(clusterOperator, last, errorReason, errorMessage) - setUploadDegradedOperatorStatusCondition(clusterOperator, last, uploadErrorReason, uploadErrorMessage) - setInsightsDownloadDegradedOperatorStatusCondition(clusterOperator, last, downloadReason, downloadMessage) - } -} - -func updateProcessingConditionWithSummary(clusterOperator *configv1.ClusterOperator, - isInitializing bool, last time.Time, errorMessage, disabledMessage, errorReason string) { - switch { - case isInitializing: - klog.V(4).Infof("The operator is still being initialized") - // if we're still starting up and some sources are not ready, initialize the conditions - // but don't update - if findOperatorStatusCondition(clusterOperator.Status.Conditions, configv1.OperatorProgressing) == nil { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionTrue, - Reason: "Initializing", - Message: "Initializing the operator", - }) - } - - case len(errorMessage) > 0: - klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionFalse, - Reason: "Degraded", - Message: "An error has occurred", - }) - - case len(disabledMessage) > 0: - klog.V(4).Infof("The operator is marked as disabled") - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: last}, - Reason: errorReason, - Message: disabledMessage, - }) - - default: - klog.V(4).Infof("The operator is healthy") - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - Message: "Monitoring the cluster", - }) - } -} - -func setInsightsDownloadDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, downloadReason string, downloadMessage string) { - if len(downloadReason) > 0 { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: InsightsDownloadDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: downloadReason, - Message: downloadMessage, - }) - } else { - removeOperatorStatusCondition(&clusterOperator.Status.Conditions, InsightsDownloadDegraded) - } -} - -func setUploadDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, uploadErrorReason string, uploadErrorMessage string) { - if len(uploadErrorReason) > 0 { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: UploadDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: uploadErrorReason, - Message: uploadErrorMessage, - }) - } else { - removeOperatorStatusCondition(&clusterOperator.Status.Conditions, UploadDegraded) - } -} - -func setDegradedOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, last time.Time, errorReason string, errorMessage string,) { - if len(errorMessage) > 0 { - klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorDegraded, - Status: configv1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: last}, - Reason: errorReason, - Message: errorMessage, - }) - } else { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorDegraded, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - }) - } -} - -func setDisabledOperatorStatusCondition(clusterOperator *configv1.ClusterOperator, disabledMessage string, disabledReason string) { - if len(disabledMessage) > 0 { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: OperatorDisabled, - Status: configv1.ConditionTrue, - Reason: disabledReason, - Message: disabledMessage, - }) - } else { - setOperatorStatusCondition(&clusterOperator.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: OperatorDisabled, - Status: configv1.ConditionFalse, - Reason: "AsExpected", - }) +func (c *Controller) relatedObjects() []configv1.ObjectReference { + return []configv1.ObjectReference{ + {Resource: "namespaces", Name: c.namespace}, + {Group: "apps", Resource: "deployments", Namespace: c.namespace, Name: "insights-operator"}, + {Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"}, + {Resource: "secrets", Namespace: "openshift-config", Name: "support"}, + {Resource: "serviceaccounts", Namespace: c.namespace, Name: "gather"}, + {Resource: "serviceaccounts", Namespace: c.namespace, Name: "operator"}, + {Resource: "services", Namespace: c.namespace, Name: "metrics"}, + {Resource: "configmaps", Namespace: c.namespace, Name: "service-ca-bundle"}, } } @@ -456,7 +318,8 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { } } c.SetLastReportedTime(reported.LastReportTime.Time.UTC()) - if con := findOperatorStatusCondition(existing.Status.Conditions, configv1.OperatorDegraded); con == nil || + cs := newConditions(existing.Status) + if con := cs.findCondition(configv1.OperatorDegraded); con == nil || con != nil && con.Status == configv1.ConditionFalse { klog.Info("The initial operator extension status is healthy") } @@ -480,52 +343,74 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { return err } -func setOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, - newCondition configv1.ClusterOperatorStatusCondition) { //nolint: gocritic - if conditions == nil { - conditions = &[]configv1.ClusterOperatorStatusCondition{} - } - existingCondition := findOperatorStatusCondition(*conditions, newCondition.Type) - if existingCondition == nil { - newCondition.LastTransitionTime = metav1.NewTime(time.Now()) - *conditions = append(*conditions, newCondition) - return - } - if existingCondition.Status != newCondition.Status { - existingCondition.Status = newCondition.Status - existingCondition.LastTransitionTime = newCondition.LastTransitionTime - } +func updateDisabledAndFailingConditions(cs *conditions, isInitializing bool, + last time.Time, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, + downloadReason, downloadMessage string) { + switch { + case isInitializing: + // the disabled condition is optional, but set it now if we already know we're disabled + if len(disabledReason) > 0 { + cs.setCondition(OperatorDisabled, configv1.ConditionTrue, disabledReason, disabledMessage, metav1.Now()) + } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message + if !cs.hasCondition(configv1.OperatorDegraded) { + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) + } - if existingCondition.LastTransitionTime.IsZero() { - existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) - } -} + default: // once we've initialized set Failing and Disabled as best we know + // handle when disabled + if len(disabledMessage) > 0 { + cs.setCondition(OperatorDisabled, configv1.ConditionTrue, disabledReason, disabledMessage, metav1.Now()) + } else { + cs.setCondition(OperatorDisabled, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) + } -func removeOperatorStatusCondition(conditions *[]configv1.ClusterOperatorStatusCondition, - conditionType configv1.ClusterStatusConditionType) { - if conditions == nil { - return - } - var newConditions []configv1.ClusterOperatorStatusCondition - for _, condition := range *conditions { - if condition.Type != conditionType { - newConditions = append(newConditions, condition) + // handle when degraded + if len(errorMessage) > 0 { + klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, errorReason, errorMessage, metav1.Time{Time: last}) + } else { + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } - } - *conditions = newConditions + // handle when upload fails + if len(uploadErrorReason) > 0 { + cs.setCondition(InsightsUploadDegraded, configv1.ConditionTrue, uploadErrorReason, uploadErrorMessage, metav1.Time{Time: last}) + } else { + cs.removeCondition(InsightsUploadDegraded) + } + + // handle when download fails + if len(downloadReason) > 0 { + cs.setCondition(InsightsDownloadDegraded, configv1.ConditionTrue, downloadReason, downloadMessage, metav1.Time{Time: last}) + } else { + cs.removeCondition(InsightsDownloadDegraded) + } + } } -func findOperatorStatusCondition(conditions []configv1.ClusterOperatorStatusCondition, - conditionType configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { - for i := range conditions { - if conditions[i].Type == conditionType { - return &conditions[i] +func updateProcessingConditionWithSummary(cs *conditions, + isInitializing bool, last time.Time, errorMessage, errorReason, disabledMessage string) { + switch { + case isInitializing: + klog.V(4).Infof("The operator is still being initialized") + // if we're still starting up and some sources are not ready, initialize the conditions + // but don't update + if !cs.hasCondition(configv1.OperatorProgressing) { + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionTrue, "Initializing", "Initializing the operator", metav1.Now()) } + + case len(errorMessage) > 0: + klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "Degraded", "An error has occurred", metav1.Now()) + + case len(disabledMessage) > 0: + klog.V(4).Infof("The operator is marked as disabled") + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, errorReason, disabledMessage, metav1.Time{Time: last}) + + default: + klog.V(4).Infof("The operator is healthy") + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) } - return nil } From f32abb367a3828bf2c2ffadf53beac57c4e22031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 16 Sep 2021 11:19:01 +0200 Subject: [PATCH 05/25] refactor: rename status*.go to controller*.go --- .../status/{status.go => controller.go} | 77 +++++++++++-------- .../{status_test.go => controller_test.go} | 0 2 files changed, 43 insertions(+), 34 deletions(-) rename pkg/controller/status/{status.go => controller.go} (93%) rename pkg/controller/status/{status_test.go => controller_test.go} (100%) diff --git a/pkg/controller/status/status.go b/pkg/controller/status/controller.go similarity index 93% rename from pkg/controller/status/status.go rename to pkg/controller/status/controller.go index a5687be5b..329e2ee81 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/controller.go @@ -138,18 +138,54 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } // calculate the current controller state + lastTransition, errorReason, uploadErrorReason, uploadErrorMessage, disabledReason, + disabledMessage, downloadReason, downloadMessage, errorMessage, reported, + clusterOperator, isInitializing := c.currentControllerStatus(clusterOperator) + + cs := newConditions(clusterOperator.Status) + + // update the disabled and failing conditions + updateDisabledAndFailingConditions(cs, isInitializing, lastTransition, + disabledReason, disabledMessage, + errorReason, errorMessage, + uploadErrorReason, uploadErrorMessage, + downloadReason, downloadMessage) + + // once the operator is running it is always considered available + cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) + + // update the Progressing condition with a summary of the current state + updateProcessingConditionWithSummary(cs, isInitializing, lastTransition, + errorMessage, errorReason, disabledMessage) + + if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { + clusterOperator.Status.Versions = []configv1.OperandVersion{ + {Name: "operator", Version: release}, + } + } + + if data, err := json.Marshal(reported); err != nil { + klog.Errorf("Unable to marshal status extension: %v", err) + } else { + clusterOperator.Status.Extension.Raw = data + } + return clusterOperator +} + +func (c *Controller) currentControllerStatus(clusterOperator *configv1.ClusterOperator) (time.Time, string, string, string, string, string, string, string, string, Reported, *configv1.ClusterOperator, bool) { var lastTransition time.Time var errorReason string var errs []string var uploadErrorReason, - uploadErrorMessage, - disabledReason, - disabledMessage, - downloadReason, - downloadMessage string + uploadErrorMessage, + disabledReason, + disabledMessage, + downloadReason, + downloadMessage string allReady := true + // FIXME: This stuff isn't doing anything related to the ClusterOperator for i, source := range c.Sources() { summary, ready := source.CurrentStatus() if !ready { @@ -220,41 +256,14 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}} + // FIXME: Now we start to do things with the ClusterOperator clusterOperator = clusterOperator.DeepCopy() now := time.Now() if len(c.namespace) > 0 { clusterOperator.Status.RelatedObjects = c.relatedObjects() } isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute - - cs := newConditions(clusterOperator.Status) - - // update the disabled and failing conditions - updateDisabledAndFailingConditions(cs, isInitializing, lastTransition, - disabledReason, disabledMessage, - errorReason, errorMessage, - uploadErrorReason, uploadErrorMessage, - downloadReason, downloadMessage) - - // once the operator is running it is always considered available - cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) - - // update the Progressing condition with a summary of the current state - updateProcessingConditionWithSummary(cs, isInitializing, lastTransition, - errorMessage, errorReason, disabledMessage) - - if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { - clusterOperator.Status.Versions = []configv1.OperandVersion{ - {Name: "operator", Version: release}, - } - } - - if data, err := json.Marshal(reported); err != nil { - klog.Errorf("Unable to marshal status extension: %v", err) - } else { - clusterOperator.Status.Extension.Raw = data - } - return clusterOperator + return lastTransition, errorReason, uploadErrorReason, uploadErrorMessage, disabledReason, disabledMessage, downloadReason, downloadMessage, errorMessage, reported, clusterOperator, isInitializing } func (c *Controller) relatedObjects() []configv1.ObjectReference { diff --git a/pkg/controller/status/status_test.go b/pkg/controller/status/controller_test.go similarity index 100% rename from pkg/controller/status/status_test.go rename to pkg/controller/status/controller_test.go From c6fd8464c1366c7063d2fbfe6317a0ed9f7c71b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 16 Sep 2021 11:28:32 +0200 Subject: [PATCH 06/25] refactor: introduce controller status and status messages --- pkg/controller/status/controller.go | 159 +++++++++++++--------------- pkg/controller/status/status.go | 54 ++++++++++ 2 files changed, 128 insertions(+), 85 deletions(-) create mode 100644 pkg/controller/status/status.go diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 329e2ee81..9ba0fcf82 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -43,8 +43,8 @@ type Configurator interface { Config() *config.Controller } -// Controller is the type responsible for managing the status of the operator according to the status of the sources. -// Sources come from different major parts of the codebase, for the purpose of communicating their status with the controller. +// Controller is the type responsible for managing the statusMessage of the operator according to the statusMessage of the sources. +// Sources come from different major parts of the codebase, for the purpose of communicating their statusMessage with the controller. type Controller struct { name string namespace string @@ -62,7 +62,7 @@ type Controller struct { lock sync.Mutex } -// NewController creates a status controller, responsible for monitoring the operators status and updating its cluster status accordingly. +// NewController creates a statusMessage controller, responsible for monitoring the operators statusMessage and updating its cluster statusMessage accordingly. func NewController(client configv1client.ConfigV1Interface, coreClient corev1client.CoreV1Interface, configurator Configurator, namespace string) *Controller { @@ -137,26 +137,26 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } } + ctrlStatus := newControllerStatus() // calculate the current controller state - lastTransition, errorReason, uploadErrorReason, uploadErrorMessage, disabledReason, - disabledMessage, downloadReason, downloadMessage, errorMessage, reported, - clusterOperator, isInitializing := c.currentControllerStatus(clusterOperator) + allReady, lastTransition := c.currentControllerStatus(ctrlStatus) - cs := newConditions(clusterOperator.Status) + clusterOperator = clusterOperator.DeepCopy() + now := time.Now() + if len(c.namespace) > 0 { + clusterOperator.Status.RelatedObjects = c.relatedObjects() + } - // update the disabled and failing conditions - updateDisabledAndFailingConditions(cs, isInitializing, lastTransition, - disabledReason, disabledMessage, - errorReason, errorMessage, - uploadErrorReason, uploadErrorMessage, - downloadReason, downloadMessage) + isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute + + // cluster operator conditions + cs := newConditions(clusterOperator.Status) + updateDisabledAndFailingConditions(cs, ctrlStatus, isInitializing, lastTransition) // once the operator is running it is always considered available cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) - // update the Progressing condition with a summary of the current state - updateProcessingConditionWithSummary(cs, isInitializing, lastTransition, - errorMessage, errorReason, disabledMessage) + updateProcessingConditionWithSummary(cs, ctrlStatus, isInitializing, lastTransition) if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { clusterOperator.Status.Versions = []configv1.OperandVersion{ @@ -164,40 +164,33 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } } + reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}} if data, err := json.Marshal(reported); err != nil { - klog.Errorf("Unable to marshal status extension: %v", err) + klog.Errorf("Unable to marshal statusMessage extension: %v", err) } else { clusterOperator.Status.Extension.Raw = data } return clusterOperator } -func (c *Controller) currentControllerStatus(clusterOperator *configv1.ClusterOperator) (time.Time, string, string, string, string, string, string, string, string, Reported, *configv1.ClusterOperator, bool) { - var lastTransition time.Time - var errorReason string +func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allReady bool, lastTransition time.Time) { + var errorReason, errorMessage string var errs []string - var uploadErrorReason, - uploadErrorMessage, - disabledReason, - disabledMessage, - downloadReason, - downloadMessage string - allReady := true + allReady = false - // FIXME: This stuff isn't doing anything related to the ClusterOperator for i, source := range c.Sources() { summary, ready := source.CurrentStatus() if !ready { klog.V(4).Infof("Source %d %T is not ready", i, source) - allReady = false + allReady = true continue } if summary.Healthy { continue } if len(summary.Message) == 0 { - klog.Errorf("Programmer error: status source %d %T reported an empty message: %#v", i, source, summary) + klog.Errorf("Programmer error: statusMessage source %d %T reported an empty message: %#v", i, source, summary) continue } @@ -212,12 +205,10 @@ func (c *Controller) currentControllerStatus(clusterOperator *configv1.ClusterOp klog.V(4).Infof("Number of lastTransition upload failures %d exceeded than threshold %d. Marking as degraded.", summary.Count, uploadFailuresCountThreshold) } - uploadErrorReason = summary.Reason - uploadErrorMessage = summary.Message + ctrlStatus.setStatus(UploadStatus, summary.Reason, summary.Message) } else if summary.Operation == controllerstatus.DownloadingReport { klog.V(4).Info("Failed to download Insights report") - downloadReason = summary.Reason - downloadMessage = summary.Message + ctrlStatus.setStatus(DownloadStatus, summary.Reason, summary.Message) } else if summary.Operation == controllerstatus.PullingSCACerts { klog.V(4).Infof("Failed to download the SCA certs within the threshold %d with exponential backoff. Marking as degraded.", OCMAPIFailureCountThreshold) @@ -235,35 +226,15 @@ func (c *Controller) currentControllerStatus(clusterOperator *configv1.ClusterOp } // handling errors - var errorMessage string - if len(errs) > 1 { - errorReason = "MultipleFailures" - sort.Strings(errs) - errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) - } else { - if len(errorReason) == 0 { - errorReason = "UnknownError" - } - if len(errs) > 0 { - errorMessage = errs[0] - } - } + errorReason, errorMessage = handleControllerStatusError(errs, errorReason, errorMessage) + ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) // disabled state only when it's disabled by config. It means that gathering will not happen if !c.configurator.Config().Report { - disabledReason = "Disabled" - disabledMessage = "Health reporting is disabled" + ctrlStatus.setStatus(DisabledStatus, "Disabled", "Health reporting is disabled") } - reported := Reported{LastReportTime: metav1.Time{Time: c.LastReportedTime()}} - // FIXME: Now we start to do things with the ClusterOperator - clusterOperator = clusterOperator.DeepCopy() - now := time.Now() - if len(c.namespace) > 0 { - clusterOperator.Status.RelatedObjects = c.relatedObjects() - } - isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute - return lastTransition, errorReason, uploadErrorReason, uploadErrorMessage, disabledReason, disabledMessage, downloadReason, downloadMessage, errorMessage, reported, clusterOperator, isInitializing + return allReady, lastTransition } func (c *Controller) relatedObjects() []configv1.ObjectReference { @@ -299,11 +270,11 @@ func (c *Controller) Start(ctx context.Context) error { case <-c.statusCh: err := limiter.Wait(ctx) if err != nil { - klog.Errorf("Limiter error by status: %v", err) + klog.Errorf("Limiter error by statusMessage: %v", err) } } if err := c.updateStatus(ctx, false); err != nil { - klog.Errorf("Unable to write cluster operator status: %v", err) + klog.Errorf("Unable to write cluster operator statusMessage: %v", err) } } }, time.Second, ctx.Done()) @@ -323,14 +294,14 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { var reported Reported if len(existing.Status.Extension.Raw) > 0 { if err := json.Unmarshal(existing.Status.Extension.Raw, &reported); err != nil { //nolint: govet - klog.Errorf("The initial operator extension status is invalid: %v", err) + klog.Errorf("The initial operator extension statusMessage is invalid: %v", err) } } c.SetLastReportedTime(reported.LastReportTime.Time.UTC()) cs := newConditions(existing.Status) if con := cs.findCondition(configv1.OperatorDegraded); con == nil || con != nil && con.Status == configv1.ConditionFalse { - klog.Info("The initial operator extension status is healthy") + klog.Info("The initial operator extension statusMessage is healthy") } } } @@ -344,7 +315,7 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { updated.ObjectMeta = created.ObjectMeta updated.Spec = created.Spec } else if reflect.DeepEqual(updated.Status, existing.Status) { - klog.V(4).Infof("No status update necessary, objects are identical") + klog.V(4).Infof("No statusMessage update necessary, objects are identical") return nil } @@ -352,55 +323,53 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { return err } - -func updateDisabledAndFailingConditions(cs *conditions, isInitializing bool, - last time.Time, disabledReason, disabledMessage, errorReason, errorMessage, uploadErrorReason, uploadErrorMessage, - downloadReason, downloadMessage string) { +func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerStatus, + isInitializing bool, lastTransition time.Time) { switch { case isInitializing: // the disabled condition is optional, but set it now if we already know we're disabled - if len(disabledReason) > 0 { - cs.setCondition(OperatorDisabled, configv1.ConditionTrue, disabledReason, disabledMessage, metav1.Now()) + if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { + cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) } - if !cs.hasCondition(configv1.OperatorDegraded) { cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } default: // once we've initialized set Failing and Disabled as best we know // handle when disabled - if len(disabledMessage) > 0 { - cs.setCondition(OperatorDisabled, configv1.ConditionTrue, disabledReason, disabledMessage, metav1.Now()) + if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { + cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) } else { cs.setCondition(OperatorDisabled, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } // handle when degraded - if len(errorMessage) > 0 { - klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) - cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, errorReason, errorMessage, metav1.Time{Time: last}) + if es := ctrlStatus.getStatus(ErrorStatus); es != nil { + klog.V(4).Infof("The operator has some internal errors: %s", es.message) + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, es.reason, es.message, metav1.Time{Time: lastTransition}) } else { cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } // handle when upload fails - if len(uploadErrorReason) > 0 { - cs.setCondition(InsightsUploadDegraded, configv1.ConditionTrue, uploadErrorReason, uploadErrorMessage, metav1.Time{Time: last}) + if ur := ctrlStatus.getStatus(UploadStatus); ur != nil { + cs.setCondition(InsightsUploadDegraded, configv1.ConditionTrue, ur.reason, ur.message, metav1.Time{Time: lastTransition}) } else { cs.removeCondition(InsightsUploadDegraded) } // handle when download fails - if len(downloadReason) > 0 { - cs.setCondition(InsightsDownloadDegraded, configv1.ConditionTrue, downloadReason, downloadMessage, metav1.Time{Time: last}) + if ds := ctrlStatus.getStatus(DownloadStatus); ds != nil { + cs.setCondition(InsightsDownloadDegraded, configv1.ConditionTrue, ds.reason, ds.message, metav1.Time{Time: lastTransition}) } else { cs.removeCondition(InsightsDownloadDegraded) } } } -func updateProcessingConditionWithSummary(cs *conditions, - isInitializing bool, last time.Time, errorMessage, errorReason, disabledMessage string) { +func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, + isInitializing bool, lastTransition time.Time) { + switch { case isInitializing: klog.V(4).Infof("The operator is still being initialized") @@ -410,16 +379,36 @@ func updateProcessingConditionWithSummary(cs *conditions, cs.setCondition(configv1.OperatorProgressing, configv1.ConditionTrue, "Initializing", "Initializing the operator", metav1.Now()) } - case len(errorMessage) > 0: - klog.V(4).Infof("The operator has some internal errors: %s", errorMessage) + case ctrlStatus.hasStatus(ErrorStatus): + es := ctrlStatus.getStatus(ErrorStatus) + klog.V(4).Infof("The operator has some internal errors: %s", es.reason) cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "Degraded", "An error has occurred", metav1.Now()) - case len(disabledMessage) > 0: + case ctrlStatus.hasStatus(DisabledStatus): + ds := ctrlStatus.getStatus(DisabledStatus) klog.V(4).Infof("The operator is marked as disabled") - cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, errorReason, disabledMessage, metav1.Time{Time: last}) + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, ds.reason, ds.message, metav1.Time{Time: lastTransition}) default: klog.V(4).Infof("The operator is healthy") cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) } } + +func handleControllerStatusError(errs []string, errorReason string, errorMessage string) (string, string) { + if len(errs) > 1 { + errorReason = "MultipleFailures" + sort.Strings(errs) + errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) + } else { + if len(errs) > 0 { + errorMessage = errs[0] + } else { + errorMessage = "Unknown error message" + } + } + if len(errorReason) == 0 { + errorReason = "UnknownError" + } + return errorReason, errorMessage +} diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go new file mode 100644 index 000000000..07e96456a --- /dev/null +++ b/pkg/controller/status/status.go @@ -0,0 +1,54 @@ +package status + +const ( + DisabledStatus = "disabled" + UploadStatus = "upload" + DownloadStatus = "download" + ErrorStatus = "error" +) + +type controllerStatus struct { + statusMap map[string]statusMessage +} + +type statusMessage struct { + reason string + message string +} + +func newControllerStatus() *controllerStatus { + return &controllerStatus{ + statusMap: map[string]statusMessage{}, + } +} + +func (c *controllerStatus) setStatus(id, reason, message string) { + entries := make(map[string]statusMessage) + for k, v := range c.statusMap { + entries[k] = v + } + + existing, ok := c.statusMap[id] + if !ok || existing.reason != reason || existing.message != message { + entries[id] = statusMessage{ + reason: reason, + message: message, + } + } + + c.statusMap = entries +} + +func (c *controllerStatus) getStatus(id string) *statusMessage { + s, ok := c.statusMap[id] + if !ok { + return nil + } + + return &s +} + +func (c *controllerStatus) hasStatus(id string) bool { + _, ok := c.statusMap[id] + return ok +} From 2814df41ac253f3d7d5dedda95254ac4dc88e5ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 16 Sep 2021 12:45:22 +0200 Subject: [PATCH 07/25] style: fixing linting issues --- pkg/controller/status/conditions.go | 10 +++++----- pkg/controller/status/controller.go | 31 ++++++++++++++--------------- pkg/controller/status/status.go | 4 ++-- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go index 030c6f451..3e5c8568c 100644 --- a/pkg/controller/status/conditions.go +++ b/pkg/controller/status/conditions.go @@ -20,7 +20,7 @@ type conditions struct { entryMap conditionsMap } -func newConditions(cos v1.ClusterOperatorStatus) *conditions { +func newConditions(cos *v1.ClusterOperatorStatus) *conditions { entries := conditionsMap{} for _, c := range cos.Conditions { entries[c.Type] = c @@ -43,10 +43,10 @@ func (c *conditions) setCondition(condition v1.ClusterStatusConditionType, lastTime = metav1.Now() } entries[condition] = v1.ClusterOperatorStatusCondition{ - Type: condition, - Status: status, - Reason: reason, - Message: message, + Type: condition, + Status: status, + Reason: reason, + Message: message, LastTransitionTime: lastTime, } } diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 9ba0fcf82..75b01a838 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -46,11 +46,11 @@ type Configurator interface { // Controller is the type responsible for managing the statusMessage of the operator according to the statusMessage of the sources. // Sources come from different major parts of the codebase, for the purpose of communicating their statusMessage with the controller. type Controller struct { - name string - namespace string + name string + namespace string - client configv1client.ConfigV1Interface - coreClient corev1client.CoreV1Interface + client configv1client.ConfigV1Interface + coreClient corev1client.CoreV1Interface statusCh chan struct{} configurator Configurator @@ -59,7 +59,7 @@ type Controller struct { reported Reported start time.Time - lock sync.Mutex + lock sync.Mutex } // NewController creates a statusMessage controller, responsible for monitoring the operators statusMessage and updating its cluster statusMessage accordingly. @@ -150,7 +150,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute // cluster operator conditions - cs := newConditions(clusterOperator.Status) + cs := newConditions(&clusterOperator.Status) updateDisabledAndFailingConditions(cs, ctrlStatus, isInitializing, lastTransition) // once the operator is running it is always considered available @@ -174,7 +174,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allReady bool, lastTransition time.Time) { - var errorReason, errorMessage string + var errorReason string var errs []string allReady = false @@ -226,7 +226,7 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR } // handling errors - errorReason, errorMessage = handleControllerStatusError(errs, errorReason, errorMessage) + errorReason, errorMessage := handleControllerStatusError(errs, errorReason) ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) // disabled state only when it's disabled by config. It means that gathering will not happen @@ -298,7 +298,7 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { } } c.SetLastReportedTime(reported.LastReportTime.Time.UTC()) - cs := newConditions(existing.Status) + cs := newConditions(&existing.Status) if con := cs.findCondition(configv1.OperatorDegraded); con == nil || con != nil && con.Status == configv1.ConditionFalse { klog.Info("The initial operator extension statusMessage is healthy") @@ -332,7 +332,7 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) } if !cs.hasCondition(configv1.OperatorDegraded) { - cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } default: // once we've initialized set Failing and Disabled as best we know @@ -369,7 +369,6 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { - switch { case isInitializing: klog.V(4).Infof("The operator is still being initialized") @@ -395,20 +394,20 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller } } -func handleControllerStatusError(errs []string, errorReason string, errorMessage string) (string, string) { +func handleControllerStatusError(errs []string, errorReason string) (reason, message string) { if len(errs) > 1 { errorReason = "MultipleFailures" sort.Strings(errs) - errorMessage = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) + message = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) } else { if len(errs) > 0 { - errorMessage = errs[0] + message = errs[0] } else { - errorMessage = "Unknown error message" + message = "Unknown error message" } } if len(errorReason) == 0 { errorReason = "UnknownError" } - return errorReason, errorMessage + return errorReason, message } diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 07e96456a..c1837deea 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -12,7 +12,7 @@ type controllerStatus struct { } type statusMessage struct { - reason string + reason string message string } @@ -31,7 +31,7 @@ func (c *controllerStatus) setStatus(id, reason, message string) { existing, ok := c.statusMap[id] if !ok || existing.reason != reason || existing.message != message { entries[id] = statusMessage{ - reason: reason, + reason: reason, message: message, } } From c4755bb5f03832b95cd6d4ed843d7961d9da03f4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 16 Sep 2021 15:59:48 +0200 Subject: [PATCH 08/25] fix: error handling to status.go --- pkg/controller/status/controller.go | 20 +++++++++----------- pkg/controller/status/status.go | 2 +- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 75b01a838..4a6d06ead 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -227,7 +227,9 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR // handling errors errorReason, errorMessage := handleControllerStatusError(errs, errorReason) - ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) + if errorReason != "" || errorMessage != "" { + ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) + } // disabled state only when it's disabled by config. It means that gathering will not happen if !c.configurator.Config().Report { @@ -396,18 +398,14 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller func handleControllerStatusError(errs []string, errorReason string) (reason, message string) { if len(errs) > 1 { - errorReason = "MultipleFailures" + reason = "MultipleFailures" sort.Strings(errs) message = fmt.Sprintf("There are multiple errors blocking progress:\n* %s", strings.Join(errs, "\n* ")) - } else { - if len(errs) > 0 { - message = errs[0] - } else { - message = "Unknown error message" + } else if len(errs) == 1 { + if len(errorReason) == 0 { + reason = "UnknownError" } + message = errs[0] } - if len(errorReason) == 0 { - errorReason = "UnknownError" - } - return errorReason, message + return reason, message } diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index c1837deea..f809ae471 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -18,7 +18,7 @@ type statusMessage struct { func newControllerStatus() *controllerStatus { return &controllerStatus{ - statusMap: map[string]statusMessage{}, + statusMap: make(map[string]statusMessage), } } From 5ee08372aea7050874a2cb63554453118f7cd173 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Fri, 17 Sep 2021 10:08:39 +0200 Subject: [PATCH 09/25] style: converting TODOS to issues --- pkg/controller/operator.go | 20 +-------- pkg/controller/status/controller.go | 67 +++++++++++++++-------------- 2 files changed, 35 insertions(+), 52 deletions(-) diff --git a/pkg/controller/operator.go b/pkg/controller/operator.go index 36f5afc96..c3980ffc9 100644 --- a/pkg/controller/operator.go +++ b/pkg/controller/operator.go @@ -60,10 +60,6 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller if err != nil { return err } - configClient, err := configv1client.NewForConfig(controller.KubeConfig) - if err != nil { - return err - } // these are gathering configs gatherProtoKubeConfig := rest.CopyConfig(controller.ProtoKubeConfig) if len(s.Impersonate) > 0 { @@ -75,8 +71,6 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller } // the metrics client will connect to prometheus and scrape a small set of metrics - // TODO: the oauth-proxy and delegating authorizer do not support Impersonate-User, - // so we do not impersonate gather metricsGatherKubeConfig := rest.CopyConfig(controller.KubeConfig) metricsGatherKubeConfig.CAFile = metricCAFile metricsGatherKubeConfig.NegotiatedSerializer = scheme.Codecs @@ -86,10 +80,6 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller // If we fail, it's likely due to the service CA not existing yet. Warn and continue, // and when the service-ca is loaded we will be restarted. - gatherKubeClient, err := kubernetes.NewForConfig(gatherProtoKubeConfig) - if err != nil { - return err - } // ensure the insight snapshot directory exists if _, err = os.Stat(s.StoragePath); err != nil && os.IsNotExist(err) { if err = os.MkdirAll(s.StoragePath, 0777); err != nil { @@ -103,7 +93,7 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller // the status controller initializes the cluster operator object and retrieves // the last sync time, if any was set - statusReporter := status.NewController(configClient, gatherKubeClient.CoreV1(), configObserver, os.Getenv("POD_NAMESPACE")) + statusReporter := status.NewController(configObserver, os.Getenv("POD_NAMESPACE")) var anonymizer *anonymization.Anonymizer if anonymization.IsObfuscationEnabled(configObserver) { @@ -148,14 +138,6 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller uploader := insightsuploader.New(recdriver, insightsClient, configObserver, statusReporter, initialDelay) statusReporter.AddSources(uploader) - // TODO: future ideas - // - // * poll periodically for new insights commands to run, then delegate - // * periodically dump crashlooping pod logs / save their messages - // * watch cluster version for an upgrade, go into extra capture mode - // * gather heap dumps from core components when master memory is above - // a threshold - // start reporting status now that all controller loops are added as sources if err = statusReporter.Start(ctx); err != nil { return fmt.Errorf("unable to set initial cluster status: %v", err) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 4a6d06ead..5dcc6dd18 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -63,13 +63,9 @@ type Controller struct { } // NewController creates a statusMessage controller, responsible for monitoring the operators statusMessage and updating its cluster statusMessage accordingly. -func NewController(client configv1client.ConfigV1Interface, - coreClient corev1client.CoreV1Interface, - configurator Configurator, namespace string) *Controller { +func NewController(configurator Configurator, namespace string) *Controller { c := &Controller{ name: "insights", - client: client, - coreClient: coreClient, statusCh: make(chan struct{}, 1), configurator: configurator, namespace: namespace, @@ -173,6 +169,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. return clusterOperator } +// calculate the current controller status based on its given sources func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allReady bool, lastTransition time.Time) { var errorReason string var errs []string @@ -239,6 +236,7 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR return allReady, lastTransition } +// create the operator's related objects func (c *Controller) relatedObjects() []configv1.ObjectReference { return []configv1.ObjectReference{ {Resource: "namespaces", Name: c.namespace}, @@ -325,10 +323,11 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { return err } +// update the cluster controller status conditions func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { - switch { - case isInitializing: + + if isInitializing { // the disabled condition is optional, but set it now if we already know we're disabled if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) @@ -336,39 +335,40 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt if !cs.hasCondition(configv1.OperatorDegraded) { cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } + } - default: // once we've initialized set Failing and Disabled as best we know - // handle when disabled - if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { - cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) - } else { - cs.setCondition(OperatorDisabled, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) - } + // once we've initialized set Failing and Disabled as best we know + // handle when disabled + if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { + cs.setCondition(OperatorDisabled, configv1.ConditionTrue, ds.reason, ds.message, metav1.Now()) + } else { + cs.setCondition(OperatorDisabled, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) + } - // handle when degraded - if es := ctrlStatus.getStatus(ErrorStatus); es != nil { - klog.V(4).Infof("The operator has some internal errors: %s", es.message) - cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, es.reason, es.message, metav1.Time{Time: lastTransition}) - } else { - cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) - } + // handle when degraded + if es := ctrlStatus.getStatus(ErrorStatus); es != nil { + klog.V(4).Infof("The operator has some internal errors: %s", es.message) + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, es.reason, es.message, metav1.Time{Time: lastTransition}) + } else { + cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) + } - // handle when upload fails - if ur := ctrlStatus.getStatus(UploadStatus); ur != nil { - cs.setCondition(InsightsUploadDegraded, configv1.ConditionTrue, ur.reason, ur.message, metav1.Time{Time: lastTransition}) - } else { - cs.removeCondition(InsightsUploadDegraded) - } + // handle when upload fails + if ur := ctrlStatus.getStatus(UploadStatus); ur != nil { + cs.setCondition(InsightsUploadDegraded, configv1.ConditionTrue, ur.reason, ur.message, metav1.Time{Time: lastTransition}) + } else { + cs.removeCondition(InsightsUploadDegraded) + } - // handle when download fails - if ds := ctrlStatus.getStatus(DownloadStatus); ds != nil { - cs.setCondition(InsightsDownloadDegraded, configv1.ConditionTrue, ds.reason, ds.message, metav1.Time{Time: lastTransition}) - } else { - cs.removeCondition(InsightsDownloadDegraded) - } + // handle when download fails + if ds := ctrlStatus.getStatus(DownloadStatus); ds != nil { + cs.setCondition(InsightsDownloadDegraded, configv1.ConditionTrue, ds.reason, ds.message, metav1.Time{Time: lastTransition}) + } else { + cs.removeCondition(InsightsDownloadDegraded) } } +// update the current controller state func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { switch { @@ -396,6 +396,7 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller } } +// handle the controller status error by formatting the present errors messages when needed func handleControllerStatusError(errs []string, errorReason string) (reason, message string) { if len(errs) > 1 { reason = "MultipleFailures" From cc26439b2d7b94c3c0035d9ec7d419381b5465fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Fri, 17 Sep 2021 10:26:58 +0200 Subject: [PATCH 10/25] refactor: smaller fixes --- pkg/controller/operator.go | 10 +++++++++- pkg/controller/status/controller.go | 8 +++----- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/pkg/controller/operator.go b/pkg/controller/operator.go index c3980ffc9..4c1149ca0 100644 --- a/pkg/controller/operator.go +++ b/pkg/controller/operator.go @@ -60,6 +60,10 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller if err != nil { return err } + configClient, err := configv1client.NewForConfig(controller.KubeConfig) + if err != nil { + return err + } // these are gathering configs gatherProtoKubeConfig := rest.CopyConfig(controller.ProtoKubeConfig) if len(s.Impersonate) > 0 { @@ -80,6 +84,10 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller // If we fail, it's likely due to the service CA not existing yet. Warn and continue, // and when the service-ca is loaded we will be restarted. + _, err = kubernetes.NewForConfig(gatherProtoKubeConfig) + if err != nil { + return err + } // ensure the insight snapshot directory exists if _, err = os.Stat(s.StoragePath); err != nil && os.IsNotExist(err) { if err = os.MkdirAll(s.StoragePath, 0777); err != nil { @@ -93,7 +101,7 @@ func (s *Operator) Run(ctx context.Context, controller *controllercmd.Controller // the status controller initializes the cluster operator object and retrieves // the last sync time, if any was set - statusReporter := status.NewController(configObserver, os.Getenv("POD_NAMESPACE")) + statusReporter := status.NewController(configClient, configObserver, os.Getenv("POD_NAMESPACE")) var anonymizer *anonymization.Anonymizer if anonymization.IsObfuscationEnabled(configObserver) { diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 5dcc6dd18..c1134aeb8 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -20,7 +20,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - corev1client "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/klog/v2" ) @@ -49,8 +48,7 @@ type Controller struct { name string namespace string - client configv1client.ConfigV1Interface - coreClient corev1client.CoreV1Interface + client configv1client.ConfigV1Interface statusCh chan struct{} configurator Configurator @@ -63,11 +61,12 @@ type Controller struct { } // NewController creates a statusMessage controller, responsible for monitoring the operators statusMessage and updating its cluster statusMessage accordingly. -func NewController(configurator Configurator, namespace string) *Controller { +func NewController(client configv1client.ConfigV1Interface, configurator Configurator, namespace string) *Controller { c := &Controller{ name: "insights", statusCh: make(chan struct{}, 1), configurator: configurator, + client: client, namespace: namespace, } return c @@ -326,7 +325,6 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { // update the cluster controller status conditions func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { - if isInitializing { // the disabled condition is optional, but set it now if we already know we're disabled if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { From fe535b4902a642b44a47b83b17d4848ffb0f69c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Mon, 20 Sep 2021 17:39:44 +0200 Subject: [PATCH 11/25] fix: fixing allReady inverted logic. --- pkg/controller/status/controller.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index c1134aeb8..bccadefa9 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -173,13 +173,13 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR var errorReason string var errs []string - allReady = false + allReady = true for i, source := range c.Sources() { summary, ready := source.CurrentStatus() if !ready { klog.V(4).Infof("Source %d %T is not ready", i, source) - allReady = true + allReady = false continue } if summary.Healthy { From 1488b791413eac27c0e71b50c7e413a7b57c5358 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Tue, 21 Sep 2021 16:06:12 +0200 Subject: [PATCH 12/25] refactor: conditions entries to clusteroperator --- pkg/controller/status/conditions.go | 8 ++++++++ pkg/controller/status/controller.go | 2 ++ 2 files changed, 10 insertions(+) diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go index 3e5c8568c..1061b93d9 100644 --- a/pkg/controller/status/conditions.go +++ b/pkg/controller/status/conditions.go @@ -82,3 +82,11 @@ func (c *conditions) findCondition(condition v1.ClusterStatusConditionType) *v1. } return nil } + +func (c *conditions) entries() []v1.ClusterOperatorStatusCondition { + var res []v1.ClusterOperatorStatusCondition + for _, v := range c.entryMap { + res = append(res, v) + } + return res +} diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index bccadefa9..4ce7a7dea 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -153,6 +153,8 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. updateProcessingConditionWithSummary(cs, ctrlStatus, isInitializing, lastTransition) + clusterOperator.Status.Conditions = cs.entries() + if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { clusterOperator.Status.Versions = []configv1.OperandVersion{ {Name: "operator", Version: release}, From 9a45d22a7195ed34e7e7212bcf1b07712a0fa5d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 22 Sep 2021 14:37:02 +0200 Subject: [PATCH 13/25] fix: fixing some code issues --- pkg/controller/status/conditions.go | 57 +++++++++++++++++++++-------- pkg/controller/status/controller.go | 24 ++++++++---- 2 files changed, 59 insertions(+), 22 deletions(-) diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go index 1061b93d9..5a98b1d44 100644 --- a/pkg/controller/status/conditions.go +++ b/pkg/controller/status/conditions.go @@ -1,37 +1,64 @@ package status import ( - v1 "github.com/openshift/api/config/v1" + configv1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( // OperatorDisabled defines the condition type when the operator's primary function has been disabled - OperatorDisabled v1.ClusterStatusConditionType = "Disabled" + OperatorDisabled configv1.ClusterStatusConditionType = "Disabled" // InsightsUploadDegraded defines the condition type (when set to True) when an archive can't be successfully uploaded - InsightsUploadDegraded v1.ClusterStatusConditionType = "UploadDegraded" + InsightsUploadDegraded configv1.ClusterStatusConditionType = "UploadDegraded" // InsightsDownloadDegraded defines the condition type (when set to True) when the Insights report can't be successfully downloaded - InsightsDownloadDegraded v1.ClusterStatusConditionType = "InsightsDownloadDegraded" + InsightsDownloadDegraded configv1.ClusterStatusConditionType = "InsightsDownloadDegraded" ) -type conditionsMap map[v1.ClusterStatusConditionType]v1.ClusterOperatorStatusCondition +type conditionsMap map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition type conditions struct { entryMap conditionsMap } -func newConditions(cos *v1.ClusterOperatorStatus) *conditions { - entries := conditionsMap{} +func newConditions(cos *configv1.ClusterOperatorStatus, time metav1.Time) *conditions { + entries := map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorProgressing: { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorUpgradeable: { + Type: configv1.OperatorUpgradeable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + } + for _, c := range cos.Conditions { entries[c.Type] = c } + return &conditions{ entryMap: entries, } } -func (c *conditions) setCondition(condition v1.ClusterStatusConditionType, - status v1.ConditionStatus, message, reason string, lastTime metav1.Time) { +func (c *conditions) setCondition(condition configv1.ClusterStatusConditionType, + status configv1.ConditionStatus, reason, message string, lastTime metav1.Time) { entries := make(conditionsMap) for k, v := range c.entryMap { entries[k] = v @@ -42,7 +69,7 @@ func (c *conditions) setCondition(condition v1.ClusterStatusConditionType, if lastTime.IsZero() { lastTime = metav1.Now() } - entries[condition] = v1.ClusterOperatorStatusCondition{ + entries[condition] = configv1.ClusterOperatorStatusCondition{ Type: condition, Status: status, Reason: reason, @@ -54,7 +81,7 @@ func (c *conditions) setCondition(condition v1.ClusterStatusConditionType, c.entryMap = entries } -func (c *conditions) removeCondition(condition v1.ClusterStatusConditionType) { +func (c *conditions) removeCondition(condition configv1.ClusterStatusConditionType) { if !c.hasCondition(condition) { return } @@ -70,12 +97,12 @@ func (c *conditions) removeCondition(condition v1.ClusterStatusConditionType) { c.entryMap = entries } -func (c *conditions) hasCondition(condition v1.ClusterStatusConditionType) bool { +func (c *conditions) hasCondition(condition configv1.ClusterStatusConditionType) bool { _, ok := c.entryMap[condition] return ok } -func (c *conditions) findCondition(condition v1.ClusterStatusConditionType) *v1.ClusterOperatorStatusCondition { +func (c *conditions) findCondition(condition configv1.ClusterStatusConditionType) *configv1.ClusterOperatorStatusCondition { existing, ok := c.entryMap[condition] if ok { return &existing @@ -83,8 +110,8 @@ func (c *conditions) findCondition(condition v1.ClusterStatusConditionType) *v1. return nil } -func (c *conditions) entries() []v1.ClusterOperatorStatusCondition { - var res []v1.ClusterOperatorStatusCondition +func (c *conditions) entries() []configv1.ClusterOperatorStatusCondition { + var res []configv1.ClusterOperatorStatusCondition for _, v := range c.entryMap { res = append(res, v) } diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 4ce7a7dea..52b123de5 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -125,11 +125,7 @@ func (c *Controller) Sources() []controllerstatus.Interface { func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1.ClusterOperator { // prime the object if it does not exist if clusterOperator == nil { - clusterOperator = &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: c.name, - }, - } + clusterOperator = newClusterOperator(c.name, nil) } ctrlStatus := newControllerStatus() @@ -145,7 +141,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute // cluster operator conditions - cs := newConditions(&clusterOperator.Status) + cs := newConditions(&clusterOperator.Status, metav1.Time{Time: now}) updateDisabledAndFailingConditions(cs, ctrlStatus, isInitializing, lastTransition) // once the operator is running it is always considered available @@ -299,7 +295,7 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { } } c.SetLastReportedTime(reported.LastReportTime.Time.UTC()) - cs := newConditions(&existing.Status) + cs := newConditions(&existing.Status, metav1.Now()) if con := cs.findCondition(configv1.OperatorDegraded); con == nil || con != nil && con.Status == configv1.ConditionFalse { klog.Info("The initial operator extension statusMessage is healthy") @@ -410,3 +406,17 @@ func handleControllerStatusError(errs []string, errorReason string) (reason, mes } return reason, message } + +func newClusterOperator(name string, status *configv1.ClusterOperatorStatus) *configv1.ClusterOperator { + co := &configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + + if status != nil { + co.Status = *status + } + + return co +} From cbb7125cedd3c93b4673e0e28e56c8f36b5f2685 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 22 Sep 2021 14:39:19 +0200 Subject: [PATCH 14/25] test: improve test coverage --- pkg/controller/status/conditions_test.go | 479 +++++++++++++++++++++++ pkg/controller/status/controller_test.go | 22 +- pkg/controller/status/status_test.go | 186 +++++++++ 3 files changed, 672 insertions(+), 15 deletions(-) create mode 100644 pkg/controller/status/conditions_test.go create mode 100644 pkg/controller/status/status_test.go diff --git a/pkg/controller/status/conditions_test.go b/pkg/controller/status/conditions_test.go new file mode 100644 index 000000000..290464250 --- /dev/null +++ b/pkg/controller/status/conditions_test.go @@ -0,0 +1,479 @@ +package status + +import ( + "reflect" + "testing" + + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Test_conditions_entries(t *testing.T) { + time := metav1.Now() + + type fields struct { + entryMap conditionsMap + } + tests := []struct { + name string + fields fields + want []configv1.ClusterOperatorStatusCondition + }{ + { + name: "Can get the condition status from entry map", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorProgressing: { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + want: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conditions{ + entryMap: tt.fields.entryMap, + } + if got := c.entries(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("entries() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_conditions_findCondition(t *testing.T) { + time := metav1.Now() + + type fields struct { + entryMap conditionsMap + } + type args struct { + condition configv1.ClusterStatusConditionType + } + tests := []struct { + name string + fields fields + args args + want *configv1.ClusterOperatorStatusCondition + }{ + { + name: "Can find the condition status", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorAvailable, + }, + want: &configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + { + name: "Can't find the condition status", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorDegraded, + }, + want: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conditions{ + entryMap: tt.fields.entryMap, + } + if got := c.findCondition(tt.args.condition); !reflect.DeepEqual(got, tt.want) { + t.Errorf("findCondition() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_conditions_hasCondition(t *testing.T) { + time := metav1.Now() + + type fields struct { + entryMap conditionsMap + } + type args struct { + condition configv1.ClusterStatusConditionType + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "Condition exists", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorAvailable, + }, + want: true, + }, + { + name: "Condition doesn't exists", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorDegraded, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conditions{ + entryMap: tt.fields.entryMap, + } + if got := c.hasCondition(tt.args.condition); got != tt.want { + t.Errorf("hasCondition() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_conditions_removeCondition(t *testing.T) { + time := metav1.Now() + + type fields struct { + entryMap conditionsMap + } + type args struct { + condition configv1.ClusterStatusConditionType + } + tests := []struct { + name string + fields fields + args args + want *conditions + }{ + { + name: "Removing non existing condition", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorDegraded, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + }, + { + name: "Remove existing condition", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorAvailable, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conditions{ + entryMap: tt.fields.entryMap, + } + c.removeCondition(tt.args.condition) + if !reflect.DeepEqual(c, tt.want) { + t.Errorf("removeCondition() = %v, want %v", c, tt.want) + } + }) + } +} + +func Test_conditions_setCondition(t *testing.T) { + time := metav1.Now() + + type fields struct { + entryMap conditionsMap + } + type args struct { + condition configv1.ClusterStatusConditionType + status configv1.ConditionStatus + reason string + message string + lastTime metav1.Time + } + tests := []struct { + name string + fields fields + args args + want *conditions + }{ + { + name: "Set not existing condition", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorDegraded, + status: configv1.ConditionUnknown, + reason: "degraded reason", + message: "degraded message", + lastTime: time, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "degraded reason", + Message: "degraded message", + }, + }, + }, + }, + { + name: "Set existing condition", + fields: fields{entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }}, + args: args{ + condition: configv1.OperatorAvailable, + status: configv1.ConditionTrue, + reason: "available reason", + message: "", + lastTime: time, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionTrue, + LastTransitionTime: time, + Reason: "available reason", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &conditions{ + entryMap: tt.fields.entryMap, + } + c.setCondition(tt.args.condition, tt.args.status, tt.args.reason, tt.args.message, time) + if !reflect.DeepEqual(c, tt.want) { + t.Errorf("setConditions() = %v, want %v", c, tt.want) + } + }) + } +} + +func Test_newConditions(t *testing.T) { + time := metav1.Now() + + type args struct { + cos *configv1.ClusterOperatorStatus + time metav1.Time + } + tests := []struct { + name string + args args + want *conditions + }{ + { + name: "Test newConditions constructor (empty)", + args: args{ + cos: &configv1.ClusterOperatorStatus{Conditions: nil}, + time: time, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorProgressing: { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorUpgradeable: { + Type: configv1.OperatorUpgradeable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + }, + { + name: "Test newConditions constructor", + args: args{ + cos: &configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "degraded reason", + Message: "degraded message", + }, + }, + }, + time: time, + }, + want: &conditions{ + entryMap: map[configv1.ClusterStatusConditionType]configv1.ClusterOperatorStatusCondition{ + configv1.OperatorAvailable: { + Type: configv1.OperatorAvailable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorProgressing: { + Type: configv1.OperatorProgressing, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + configv1.OperatorDegraded: { + Type: configv1.OperatorDegraded, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "degraded reason", + Message: "degraded message", + }, + configv1.OperatorUpgradeable: { + Type: configv1.OperatorUpgradeable, + Status: configv1.ConditionUnknown, + LastTransitionTime: time, + Reason: "", + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := newConditions(tt.args.cos, tt.args.time); !reflect.DeepEqual(got, tt.want) { + t.Errorf("newConditions() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/controller/status/controller_test.go b/pkg/controller/status/controller_test.go index 0e95db36d..f1c0b02a0 100644 --- a/pkg/controller/status/controller_test.go +++ b/pkg/controller/status/controller_test.go @@ -4,7 +4,6 @@ import ( "context" "testing" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" @@ -38,27 +37,20 @@ func Test_Status_SaveInitialStart(t *testing.T) { { name: "Initial run with existing Insights operator which is degraded is delayed", initialRun: true, - clusterOperator: &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "insights", - }, - Status: configv1.ClusterOperatorStatus{Conditions: []configv1.ClusterOperatorStatusCondition{ + clusterOperator: newClusterOperator( + "insights", + &configv1.ClusterOperatorStatus{Conditions: []configv1.ClusterOperatorStatusCondition{ {Type: configv1.OperatorDegraded, Status: configv1.ConditionTrue}, - }}, - }, + }}), expectedSafeInitialStart: false, }, { name: "Initial run with existing Insights operator which is not degraded not delayed", initialRun: true, - clusterOperator: &configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: "insights", - }, - Status: configv1.ClusterOperatorStatus{Conditions: []configv1.ClusterOperatorStatusCondition{ + clusterOperator: newClusterOperator("insights", + &configv1.ClusterOperatorStatus{Conditions: []configv1.ClusterOperatorStatusCondition{ {Type: configv1.OperatorDegraded, Status: configv1.ConditionFalse}, - }}, - }, + }}), expectedSafeInitialStart: true, }, } diff --git a/pkg/controller/status/status_test.go b/pkg/controller/status/status_test.go new file mode 100644 index 000000000..6281403fb --- /dev/null +++ b/pkg/controller/status/status_test.go @@ -0,0 +1,186 @@ +package status + +import ( + "reflect" + "testing" +) + +func Test_controllerStatus_getStatus(t *testing.T) { + type fields struct { + statusMap map[string]statusMessage + } + type args struct { + id string + } + tests := []struct { + name string + fields fields + args args + want *statusMessage + }{ + { + name: "Must get nil if there is no status", + fields: fields{statusMap: map[string]statusMessage{}}, + args: args{id: DisabledStatus}, + want: nil, + }, + { + name: "Can get the status message", + fields: fields{ + statusMap: map[string]statusMessage{ + "disabled": {reason: "disabled reason", message: "disabled message"}, + "upload": {reason: "upload reason", message: "upload message"}, + "download": {reason: "download reason", message: "download message"}, + "error": {reason: "error reason", message: "error message"}, + }, + }, + args: args{id: DownloadStatus}, + want: &statusMessage{"download reason", "download message"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &controllerStatus{ + statusMap: tt.fields.statusMap, + } + if got := c.getStatus(tt.args.id); !reflect.DeepEqual(got, tt.want) { + t.Errorf("getStatus() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_controllerStatus_hasStatus(t *testing.T) { + type fields struct { + statusMap map[string]statusMessage + } + type args struct { + id string + } + tests := []struct { + name string + fields fields + args args + want bool + }{ + { + name: "Must be false if status doesn't exist", + fields: fields{statusMap: map[string]statusMessage{}}, + args: args{id: DisabledStatus}, + want: false, + }, + { + name: "Must be true if status exists", + fields: fields{ + statusMap: map[string]statusMessage{ + "disabled": {reason: "disabled reason", message: "disabled message"}, + "upload": {reason: "upload reason", message: "upload message"}, + "download": {reason: "download reason", message: "download message"}, + "error": {reason: "error reason", message: "error message"}, + }, + }, + args: args{id: DownloadStatus}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &controllerStatus{ + statusMap: tt.fields.statusMap, + } + if got := c.hasStatus(tt.args.id); !reflect.DeepEqual(got, tt.want) { + t.Errorf("hasStatus() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_controllerStatus_setStatus(t *testing.T) { + type fields struct { + statusMap map[string]statusMessage + } + type args struct { + id string + reason string + message string + } + tests := []struct { + name string + fields fields + args args + statusID string + want *controllerStatus + }{ + { + name: "Change not existing status", + fields: fields{statusMap: map[string]statusMessage{}}, + args: args{id: DisabledStatus, reason: "disabled reason", message: "disabled message"}, + want: &controllerStatus{statusMap: map[string]statusMessage{ + "disabled": {reason: "disabled reason", message: "disabled message"}, + }}, + }, + { + name: "Update existing status with new reason", + fields: fields{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "disabled reason", message: "disabled message"}, + }}, + args: args{id: DisabledStatus, reason: "new disabled reason", message: "disabled message"}, + want: &controllerStatus{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "new disabled reason", message: "disabled message"}, + }}, + }, + { + name: "Update existing status with new message", + fields: fields{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "disabled reason", message: "disabled message"}, + }}, + args: args{id: DisabledStatus, reason: "disabled reason", message: "new disabled message"}, + want: &controllerStatus{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "disabled reason", message: "new disabled message"}, + }}, + }, + { + name: "Update existing status with same status message", + fields: fields{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "disabled reason", message: "disabled message"}, + }}, + args: args{id: DisabledStatus, reason: "disabled reason", message: "disabled message"}, + want: &controllerStatus{statusMap: map[string]statusMessage{ + "upload": {reason: "upload reason", message: "upload message"}, + "disabled": {reason: "disabled reason", message: "disabled message"}, + }}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &controllerStatus{ + statusMap: tt.fields.statusMap, + } + c.setStatus(tt.args.id, tt.args.reason, tt.args.message) + if !reflect.DeepEqual(c, tt.want) { + t.Errorf("entries() = %v, want %v", c, tt.want) + } + }) + } +} + +func Test_newControllerStatus(t *testing.T) { + tests := []struct { + name string + want *controllerStatus + }{ + {name: "Test statusController constructor", want: &controllerStatus{statusMap: make(map[string]statusMessage)}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := newControllerStatus(); !reflect.DeepEqual(got, tt.want) { + t.Errorf("newControllerStatus() = %v, want %v", got, tt.want) + } + }) + } +} From 0b6f95d315dfc43872936d39676b184e6f779db2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 22 Sep 2021 15:15:50 +0200 Subject: [PATCH 15/25] style: disable funlen for Test_conditions_setCondition --- pkg/controller/status/conditions_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controller/status/conditions_test.go b/pkg/controller/status/conditions_test.go index 290464250..c3b183a75 100644 --- a/pkg/controller/status/conditions_test.go +++ b/pkg/controller/status/conditions_test.go @@ -267,6 +267,7 @@ func Test_conditions_removeCondition(t *testing.T) { } } +// nolint: funlen func Test_conditions_setCondition(t *testing.T) { time := metav1.Now() From ab0bb88848703dbece7ce58412ad0b47784f884a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 22 Sep 2021 15:58:36 +0200 Subject: [PATCH 16/25] refactor: replace switch with if --- pkg/controller/status/controller.go | 51 +++++++++++++++-------------- 1 file changed, 27 insertions(+), 24 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 52b123de5..62e88d053 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -135,7 +135,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. clusterOperator = clusterOperator.DeepCopy() now := time.Now() if len(c.namespace) > 0 { - clusterOperator.Status.RelatedObjects = c.relatedObjects() + clusterOperator.Status.RelatedObjects = relatedObjects(c.namespace) } isInitializing := !allReady && now.Sub(c.controllerStartTime()) < 3*time.Minute @@ -149,6 +149,10 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. updateProcessingConditionWithSummary(cs, ctrlStatus, isInitializing, lastTransition) + klog.V(4).Infof("The operator is healthy") + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) + + // all status conditions from conditions to cluster operator clusterOperator.Status.Conditions = cs.entries() if release := os.Getenv("RELEASE_VERSION"); len(release) > 0 { @@ -233,20 +237,6 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR return allReady, lastTransition } -// create the operator's related objects -func (c *Controller) relatedObjects() []configv1.ObjectReference { - return []configv1.ObjectReference{ - {Resource: "namespaces", Name: c.namespace}, - {Group: "apps", Resource: "deployments", Namespace: c.namespace, Name: "insights-operator"}, - {Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"}, - {Resource: "secrets", Namespace: "openshift-config", Name: "support"}, - {Resource: "serviceaccounts", Namespace: c.namespace, Name: "gather"}, - {Resource: "serviceaccounts", Namespace: c.namespace, Name: "operator"}, - {Resource: "services", Namespace: c.namespace, Name: "metrics"}, - {Resource: "configmaps", Namespace: c.namespace, Name: "service-ca-bundle"}, - } -} - // Start starts the periodic checking of sources. func (c *Controller) Start(ctx context.Context) error { if err := c.updateStatus(ctx, true); err != nil { @@ -341,7 +331,7 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt cs.setCondition(OperatorDisabled, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) } - // handle when degraded + // handle when has errors if es := ctrlStatus.getStatus(ErrorStatus); es != nil { klog.V(4).Infof("The operator has some internal errors: %s", es.message) cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, es.reason, es.message, metav1.Time{Time: lastTransition}) @@ -367,28 +357,26 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt // update the current controller state func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { - switch { - case isInitializing: + + if isInitializing { klog.V(4).Infof("The operator is still being initialized") // if we're still starting up and some sources are not ready, initialize the conditions // but don't update if !cs.hasCondition(configv1.OperatorProgressing) { cs.setCondition(configv1.OperatorProgressing, configv1.ConditionTrue, "Initializing", "Initializing the operator", metav1.Now()) } + } - case ctrlStatus.hasStatus(ErrorStatus): + if ctrlStatus.hasStatus(ErrorStatus) { es := ctrlStatus.getStatus(ErrorStatus) klog.V(4).Infof("The operator has some internal errors: %s", es.reason) cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "Degraded", "An error has occurred", metav1.Now()) + } - case ctrlStatus.hasStatus(DisabledStatus): + if ctrlStatus.hasStatus(DisabledStatus) { ds := ctrlStatus.getStatus(DisabledStatus) klog.V(4).Infof("The operator is marked as disabled") cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, ds.reason, ds.message, metav1.Time{Time: lastTransition}) - - default: - klog.V(4).Infof("The operator is healthy") - cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) } } @@ -407,6 +395,7 @@ func handleControllerStatusError(errs []string, errorReason string) (reason, mes return reason, message } +// create a new cluster operator with defaults values func newClusterOperator(name string, status *configv1.ClusterOperatorStatus) *configv1.ClusterOperator { co := &configv1.ClusterOperator{ ObjectMeta: metav1.ObjectMeta{ @@ -420,3 +409,17 @@ func newClusterOperator(name string, status *configv1.ClusterOperatorStatus) *co return co } + +// create the operator's related objects +func relatedObjects(namespace string) []configv1.ObjectReference { + return []configv1.ObjectReference{ + {Resource: "namespaces", Name: namespace}, + {Group: "apps", Resource: "deployments", Namespace: namespace, Name: "insights-operator"}, + {Resource: "secrets", Namespace: "openshift-config", Name: "pull-secret"}, + {Resource: "secrets", Namespace: "openshift-config", Name: "support"}, + {Resource: "serviceaccounts", Namespace: namespace, Name: "gather"}, + {Resource: "serviceaccounts", Namespace: namespace, Name: "operator"}, + {Resource: "services", Namespace: namespace, Name: "metrics"}, + {Resource: "configmaps", Namespace: namespace, Name: "service-ca-bundle"}, + } +} From 2adc54aea987cee09ffe91e42308e877e5eb9c17 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 22 Sep 2021 16:09:02 +0200 Subject: [PATCH 17/25] style: happy lint --- pkg/controller/status/controller.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 62e88d053..0eab2dd84 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -357,7 +357,6 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt // update the current controller state func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { - if isInitializing { klog.V(4).Infof("The operator is still being initialized") // if we're still starting up and some sources are not ready, initialize the conditions From 262453a7da46aeabcfee69575d573b39c8c0aa09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 23 Sep 2021 16:34:14 +0200 Subject: [PATCH 18/25] refactor: remove useless operator status --- pkg/controller/status/conditions.go | 6 ------ pkg/controller/status/conditions_test.go | 12 ------------ 2 files changed, 18 deletions(-) diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go index 5a98b1d44..2d9a5d6dd 100644 --- a/pkg/controller/status/conditions.go +++ b/pkg/controller/status/conditions.go @@ -40,12 +40,6 @@ func newConditions(cos *configv1.ClusterOperatorStatus, time metav1.Time) *condi LastTransitionTime: time, Reason: "", }, - configv1.OperatorUpgradeable: { - Type: configv1.OperatorUpgradeable, - Status: configv1.ConditionUnknown, - LastTransitionTime: time, - Reason: "", - }, } for _, c := range cos.Conditions { diff --git a/pkg/controller/status/conditions_test.go b/pkg/controller/status/conditions_test.go index c3b183a75..715648410 100644 --- a/pkg/controller/status/conditions_test.go +++ b/pkg/controller/status/conditions_test.go @@ -414,12 +414,6 @@ func Test_newConditions(t *testing.T) { LastTransitionTime: time, Reason: "", }, - configv1.OperatorUpgradeable: { - Type: configv1.OperatorUpgradeable, - Status: configv1.ConditionUnknown, - LastTransitionTime: time, - Reason: "", - }, }, }, }, @@ -460,12 +454,6 @@ func Test_newConditions(t *testing.T) { Reason: "degraded reason", Message: "degraded message", }, - configv1.OperatorUpgradeable: { - Type: configv1.OperatorUpgradeable, - Status: configv1.ConditionUnknown, - LastTransitionTime: time, - Reason: "", - }, }, }, }, From 105195d5798fb67679e4a1639b8641f61bfea6a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 23 Sep 2021 16:35:02 +0200 Subject: [PATCH 19/25] chore: better test name --- pkg/controller/status/controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/controller/status/controller_test.go b/pkg/controller/status/controller_test.go index f1c0b02a0..c30c62e78 100644 --- a/pkg/controller/status/controller_test.go +++ b/pkg/controller/status/controller_test.go @@ -24,7 +24,7 @@ func Test_Status_SaveInitialStart(t *testing.T) { expectedSafeInitialStart bool }{ { - name: "Non-initial run is has upload delayed", + name: "Non-initial run has its upload delayed", initialRun: false, expectedSafeInitialStart: false, }, From 847635a9f5ce1e48f62934a0b58f1a15f6fac7ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Thu, 23 Sep 2021 16:35:52 +0200 Subject: [PATCH 20/25] refactor: improve log msg and logic --- pkg/controller/status/controller.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 0eab2dd84..fed34a4d3 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -196,11 +196,11 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR if summary.Operation == controllerstatus.Uploading { if summary.Count < uploadFailuresCountThreshold { - klog.V(4).Infof("Number of lastTransition upload failures %d lower than threshold %d. Not marking as degraded.", + klog.V(4).Infof("Number of last upload failures %d lower than threshold %d. Not marking as degraded.", summary.Count, uploadFailuresCountThreshold) } else { degradingFailure = true - klog.V(4).Infof("Number of lastTransition upload failures %d exceeded than threshold %d. Marking as degraded.", + klog.V(4).Infof("Number of last upload failures %d exceeded the threshold %d. Marking as degraded.", summary.Count, uploadFailuresCountThreshold) } ctrlStatus.setStatus(UploadStatus, summary.Reason, summary.Message) @@ -366,14 +366,12 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller } } - if ctrlStatus.hasStatus(ErrorStatus) { - es := ctrlStatus.getStatus(ErrorStatus) + if es := ctrlStatus.getStatus(ErrorStatus); es != nil { klog.V(4).Infof("The operator has some internal errors: %s", es.reason) cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "Degraded", "An error has occurred", metav1.Now()) } - if ctrlStatus.hasStatus(DisabledStatus) { - ds := ctrlStatus.getStatus(DisabledStatus) + if ds := ctrlStatus.getStatus(DisabledStatus); ds != nil { klog.V(4).Infof("The operator is marked as disabled") cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, ds.reason, ds.message, metav1.Time{Time: lastTransition}) } From e99b571d087a80d079d4a2d231d5261d047ed316 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 29 Sep 2021 10:44:47 +0200 Subject: [PATCH 21/25] refactor: remove duplicated message --- pkg/controller/status/controller.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index fed34a4d3..4cbc64490 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -333,7 +333,6 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt // handle when has errors if es := ctrlStatus.getStatus(ErrorStatus); es != nil { - klog.V(4).Infof("The operator has some internal errors: %s", es.message) cs.setCondition(configv1.OperatorDegraded, configv1.ConditionTrue, es.reason, es.message, metav1.Time{Time: lastTransition}) } else { cs.setCondition(configv1.OperatorDegraded, configv1.ConditionFalse, "AsExpected", "", metav1.Now()) @@ -367,7 +366,7 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller } if es := ctrlStatus.getStatus(ErrorStatus); es != nil { - klog.V(4).Infof("The operator has some internal errors: %s", es.reason) + klog.V(4).Infof("The operator has some internal errors: %s", es.message) cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "Degraded", "An error has occurred", metav1.Now()) } From 069fff9082979c2c108b5cebb810faf78fed441a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 29 Sep 2021 11:28:27 +0200 Subject: [PATCH 22/25] chore: fixing broken test --- pkg/controller/status/controller.go | 20 +++++++++++--------- pkg/controller/status/controller_test.go | 4 +++- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 4cbc64490..9eeb6fa18 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -57,6 +57,8 @@ type Controller struct { reported Reported start time.Time + ctrlStatus *controllerStatus + lock sync.Mutex } @@ -68,6 +70,7 @@ func NewController(client configv1client.ConfigV1Interface, configurator Configu configurator: configurator, client: client, namespace: namespace, + ctrlStatus: newControllerStatus(), } return c } @@ -128,9 +131,8 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. clusterOperator = newClusterOperator(c.name, nil) } - ctrlStatus := newControllerStatus() // calculate the current controller state - allReady, lastTransition := c.currentControllerStatus(ctrlStatus) + allReady, lastTransition := c.currentControllerStatus() clusterOperator = clusterOperator.DeepCopy() now := time.Now() @@ -142,12 +144,12 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. // cluster operator conditions cs := newConditions(&clusterOperator.Status, metav1.Time{Time: now}) - updateDisabledAndFailingConditions(cs, ctrlStatus, isInitializing, lastTransition) + updateDisabledAndFailingConditions(cs, c.ctrlStatus, isInitializing, lastTransition) // once the operator is running it is always considered available cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) - updateProcessingConditionWithSummary(cs, ctrlStatus, isInitializing, lastTransition) + updateProcessingConditionWithSummary(cs, c.ctrlStatus, isInitializing, lastTransition) klog.V(4).Infof("The operator is healthy") cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) @@ -171,7 +173,7 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. } // calculate the current controller status based on its given sources -func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allReady bool, lastTransition time.Time) { +func (c *Controller) currentControllerStatus() (allReady bool, lastTransition time.Time) { var errorReason string var errs []string @@ -203,10 +205,10 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR klog.V(4).Infof("Number of last upload failures %d exceeded the threshold %d. Marking as degraded.", summary.Count, uploadFailuresCountThreshold) } - ctrlStatus.setStatus(UploadStatus, summary.Reason, summary.Message) + c.ctrlStatus.setStatus(UploadStatus, summary.Reason, summary.Message) } else if summary.Operation == controllerstatus.DownloadingReport { klog.V(4).Info("Failed to download Insights report") - ctrlStatus.setStatus(DownloadStatus, summary.Reason, summary.Message) + c.ctrlStatus.setStatus(DownloadStatus, summary.Reason, summary.Message) } else if summary.Operation == controllerstatus.PullingSCACerts { klog.V(4).Infof("Failed to download the SCA certs within the threshold %d with exponential backoff. Marking as degraded.", OCMAPIFailureCountThreshold) @@ -226,12 +228,12 @@ func (c *Controller) currentControllerStatus(ctrlStatus *controllerStatus) (allR // handling errors errorReason, errorMessage := handleControllerStatusError(errs, errorReason) if errorReason != "" || errorMessage != "" { - ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) + c.ctrlStatus.setStatus(ErrorStatus, errorReason, errorMessage) } // disabled state only when it's disabled by config. It means that gathering will not happen if !c.configurator.Config().Report { - ctrlStatus.setStatus(DisabledStatus, "Disabled", "Health reporting is disabled") + c.ctrlStatus.setStatus(DisabledStatus, "Disabled", "Health reporting is disabled") } return allReady, lastTransition diff --git a/pkg/controller/status/controller_test.go b/pkg/controller/status/controller_test.go index c30c62e78..299758a7f 100644 --- a/pkg/controller/status/controller_test.go +++ b/pkg/controller/status/controller_test.go @@ -71,7 +71,9 @@ func Test_Status_SaveInitialStart(t *testing.T) { ctrl := &Controller{ name: "insights", client: client.ConfigV1(), - configurator: configobserver.New(config.Controller{Report: true}, kubeclientsetclient)} + configurator: configobserver.New(config.Controller{Report: true}, kubeclientsetclient), + ctrlStatus: newControllerStatus(), + } err := ctrl.updateStatus(context.Background(), tt.initialRun) if err != tt.expErr { From 65300b4e093ee7075e272b9e899742a86912b72d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Mon, 4 Oct 2021 10:25:32 +0200 Subject: [PATCH 23/25] fix: reset status controller every time (testing) --- pkg/controller/status/controller.go | 3 +++ pkg/controller/status/status.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index 9eeb6fa18..dcc8b4d84 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -131,6 +131,9 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. clusterOperator = newClusterOperator(c.name, nil) } + // make sure to start a clean status controller + c.ctrlStatus.reset() + // calculate the current controller state allReady, lastTransition := c.currentControllerStatus() diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index f809ae471..84d1f1aa7 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -52,3 +52,7 @@ func (c *controllerStatus) hasStatus(id string) bool { _, ok := c.statusMap[id] return ok } + +func (c *controllerStatus) reset() { + c.statusMap = make(map[string]statusMessage) +} From 53b5d39dca8d8ddc6aa9747bebaae8d072d768cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Tue, 5 Oct 2021 16:47:03 +0200 Subject: [PATCH 24/25] fix: check healthy operator status --- pkg/controller/status/controller.go | 18 ++++++++++-------- pkg/controller/status/status.go | 4 ++++ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/pkg/controller/status/controller.go b/pkg/controller/status/controller.go index dcc8b4d84..18e262039 100644 --- a/pkg/controller/status/controller.go +++ b/pkg/controller/status/controller.go @@ -147,15 +147,12 @@ func (c *Controller) merge(clusterOperator *configv1.ClusterOperator) *configv1. // cluster operator conditions cs := newConditions(&clusterOperator.Status, metav1.Time{Time: now}) - updateDisabledAndFailingConditions(cs, c.ctrlStatus, isInitializing, lastTransition) + updateControllerConditions(cs, c.ctrlStatus, isInitializing, lastTransition) // once the operator is running it is always considered available cs.setCondition(configv1.OperatorAvailable, configv1.ConditionTrue, "AsExpected", "", metav1.Now()) - updateProcessingConditionWithSummary(cs, c.ctrlStatus, isInitializing, lastTransition) - - klog.V(4).Infof("The operator is healthy") - cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) + updateControllerConditionsByStatus(cs, c.ctrlStatus, isInitializing, lastTransition) // all status conditions from conditions to cluster operator clusterOperator.Status.Conditions = cs.entries() @@ -316,7 +313,7 @@ func (c *Controller) updateStatus(ctx context.Context, initial bool) error { } // update the cluster controller status conditions -func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerStatus, +func updateControllerConditions(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { if isInitializing { // the disabled condition is optional, but set it now if we already know we're disabled @@ -358,8 +355,8 @@ func updateDisabledAndFailingConditions(cs *conditions, ctrlStatus *controllerSt } } -// update the current controller state -func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controllerStatus, +// update the current controller state by it status +func updateControllerConditionsByStatus(cs *conditions, ctrlStatus *controllerStatus, isInitializing bool, lastTransition time.Time) { if isInitializing { klog.V(4).Infof("The operator is still being initialized") @@ -379,6 +376,11 @@ func updateProcessingConditionWithSummary(cs *conditions, ctrlStatus *controller klog.V(4).Infof("The operator is marked as disabled") cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, ds.reason, ds.message, metav1.Time{Time: lastTransition}) } + + if ctrlStatus.isHealthy() { + klog.V(4).Infof("The operator is healthy") + cs.setCondition(configv1.OperatorProgressing, configv1.ConditionFalse, "AsExpected", "Monitoring the cluster", metav1.Now()) + } } // handle the controller status error by formatting the present errors messages when needed diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 84d1f1aa7..7dd4a9696 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -56,3 +56,7 @@ func (c *controllerStatus) hasStatus(id string) bool { func (c *controllerStatus) reset() { c.statusMap = make(map[string]statusMessage) } + +func (c *controllerStatus) isHealthy() bool { + return !(c.hasStatus(ErrorStatus) || c.hasStatus(DisabledStatus)) +} From 69a4731830fc93eb2a4674de65b40153397c64e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ricardo=20L=C3=BCders?= Date: Wed, 6 Oct 2021 10:06:56 +0200 Subject: [PATCH 25/25] fix: improving code based on review --- pkg/controller/status/conditions.go | 14 +--------- pkg/controller/status/status.go | 2 +- pkg/controller/status/status_test.go | 42 ++++++++++++++-------------- 3 files changed, 23 insertions(+), 35 deletions(-) diff --git a/pkg/controller/status/conditions.go b/pkg/controller/status/conditions.go index 2d9a5d6dd..dc3c8d47a 100644 --- a/pkg/controller/status/conditions.go +++ b/pkg/controller/status/conditions.go @@ -76,19 +76,7 @@ func (c *conditions) setCondition(condition configv1.ClusterStatusConditionType, } func (c *conditions) removeCondition(condition configv1.ClusterStatusConditionType) { - if !c.hasCondition(condition) { - return - } - - entries := make(conditionsMap) - for k, v := range c.entryMap { - if k == condition { - continue - } - entries[k] = v - } - - c.entryMap = entries + delete(c.entryMap, condition) } func (c *conditions) hasCondition(condition configv1.ClusterStatusConditionType) bool { diff --git a/pkg/controller/status/status.go b/pkg/controller/status/status.go index 7dd4a9696..5f58abb5e 100644 --- a/pkg/controller/status/status.go +++ b/pkg/controller/status/status.go @@ -58,5 +58,5 @@ func (c *controllerStatus) reset() { } func (c *controllerStatus) isHealthy() bool { - return !(c.hasStatus(ErrorStatus) || c.hasStatus(DisabledStatus)) + return !c.hasStatus(ErrorStatus) } diff --git a/pkg/controller/status/status_test.go b/pkg/controller/status/status_test.go index 6281403fb..55a8a058b 100644 --- a/pkg/controller/status/status_test.go +++ b/pkg/controller/status/status_test.go @@ -28,10 +28,10 @@ func Test_controllerStatus_getStatus(t *testing.T) { name: "Can get the status message", fields: fields{ statusMap: map[string]statusMessage{ - "disabled": {reason: "disabled reason", message: "disabled message"}, - "upload": {reason: "upload reason", message: "upload message"}, - "download": {reason: "download reason", message: "download message"}, - "error": {reason: "error reason", message: "error message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DownloadStatus: {reason: "download reason", message: "download message"}, + ErrorStatus: {reason: "error reason", message: "error message"}, }, }, args: args{id: DownloadStatus}, @@ -73,10 +73,10 @@ func Test_controllerStatus_hasStatus(t *testing.T) { name: "Must be true if status exists", fields: fields{ statusMap: map[string]statusMessage{ - "disabled": {reason: "disabled reason", message: "disabled message"}, - "upload": {reason: "upload reason", message: "upload message"}, - "download": {reason: "download reason", message: "download message"}, - "error": {reason: "error reason", message: "error message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DownloadStatus: {reason: "download reason", message: "download message"}, + ErrorStatus: {reason: "error reason", message: "error message"}, }, }, args: args{id: DownloadStatus}, @@ -116,43 +116,43 @@ func Test_controllerStatus_setStatus(t *testing.T) { fields: fields{statusMap: map[string]statusMessage{}}, args: args{id: DisabledStatus, reason: "disabled reason", message: "disabled message"}, want: &controllerStatus{statusMap: map[string]statusMessage{ - "disabled": {reason: "disabled reason", message: "disabled message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, }}, }, { name: "Update existing status with new reason", fields: fields{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, }}, args: args{id: DisabledStatus, reason: "new disabled reason", message: "disabled message"}, want: &controllerStatus{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "new disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "new disabled reason", message: "disabled message"}, }}, }, { name: "Update existing status with new message", fields: fields{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, }}, args: args{id: DisabledStatus, reason: "disabled reason", message: "new disabled message"}, want: &controllerStatus{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "disabled reason", message: "new disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "disabled reason", message: "new disabled message"}, }}, }, { name: "Update existing status with same status message", fields: fields{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, }}, args: args{id: DisabledStatus, reason: "disabled reason", message: "disabled message"}, want: &controllerStatus{statusMap: map[string]statusMessage{ - "upload": {reason: "upload reason", message: "upload message"}, - "disabled": {reason: "disabled reason", message: "disabled message"}, + UploadStatus: {reason: "upload reason", message: "upload message"}, + DisabledStatus: {reason: "disabled reason", message: "disabled message"}, }}, }, }