diff --git a/controllers/operatorpolicy_controller.go b/controllers/operatorpolicy_controller.go index 45bbd2c7..9a09d07d 100644 --- a/controllers/operatorpolicy_controller.go +++ b/controllers/operatorpolicy_controller.go @@ -14,6 +14,7 @@ import ( operatorv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" depclient "github.com/stolostron/kubernetes-dependency-watches/client" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -64,6 +65,11 @@ var ( Version: "v1alpha1", Kind: "CatalogSource", } + installPlanGVK = schema.GroupVersionKind{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Kind: "InstallPlan", + } ) // OperatorPolicyReconciler reconciles a OperatorPolicy object @@ -158,6 +164,12 @@ func (r *OperatorPolicyReconciler) Reconcile(ctx context.Context, req ctrl.Reque return reconcile.Result{}, err } + if err := r.handleInstallPlan(ctx, policy, subscription); err != nil { + OpLog.Error(err, "Error handling InstallPlan") + + return reconcile.Result{}, err + } + csv, err := r.handleCSV(ctx, policy, subscription) if err != nil { OpLog.Error(err, "Error handling CSVs") @@ -529,8 +541,28 @@ func (r *OperatorPolicyReconciler) handleSubscription( } if !updateNeeded { - // FUTURE: Check more details about the *status* of the Subscription - // For now, just mark it as compliant + subResFailed := mergedSub.Status.GetCondition(operatorv1alpha1.SubscriptionResolutionFailed) + + if subResFailed.Status == corev1.ConditionTrue { + cond := metav1.Condition{ + Type: subConditionType, + Status: metav1.ConditionFalse, + Reason: subResFailed.Reason, + Message: subResFailed.Message, + } + + if subResFailed.LastTransitionTime != nil { + cond.LastTransitionTime = *subResFailed.LastTransitionTime + } + + err := r.updateStatus(ctx, policy, cond, nonCompObj(foundSub, subResFailed.Reason)) + if err != nil { + return nil, fmt.Errorf("error updating the status for a Subscription in ResolutionFailed: %w", err) + } + + return mergedSub, nil + } + err := r.updateStatus(ctx, policy, matchesCond("Subscription"), matchedObj(foundSub)) if err != nil { return nil, fmt.Errorf("error updating the status for a Subscription that matches: %w", err) @@ -603,6 +635,12 @@ func buildSubscription( subscription.ObjectMeta.Namespace = ns subscription.Spec = spec + // If the policy is in `enforce` mode and the allowed CSVs are restricted, + // the InstallPlanApproval will be set to Manual so that upgrades can be controlled. + if policy.Spec.RemediationAction.IsEnforce() && len(policy.Spec.Versions) > 0 { + subscription.Spec.InstallPlanApproval = operatorv1alpha1.ApprovalManual + } + return subscription, nil } @@ -790,3 +828,211 @@ func (r *OperatorPolicyReconciler) mergeObjects( return updateNeeded, false, nil } + +func (r *OperatorPolicyReconciler) handleInstallPlan( + ctx context.Context, policy *policyv1beta1.OperatorPolicy, sub *operatorv1alpha1.Subscription, +) error { + if sub == nil { + err := r.updateStatus(ctx, policy, noInstallPlansCond, noInstallPlansObj("")) + if err != nil { + return fmt.Errorf("error updating status when the subscription is nil: %w", err) + } + + return nil + } + + watcher := opPolIdentifier(policy.Namespace, policy.Name) + + foundInstallPlans, err := r.DynamicWatcher.List( + watcher, installPlanGVK, sub.Namespace, labels.Everything()) + if err != nil { + return fmt.Errorf("error listing InstallPlans: %w", err) + } + + ownedInstallPlans := make([]unstructured.Unstructured, 0, len(foundInstallPlans)) + + for _, installPlan := range foundInstallPlans { + for _, owner := range installPlan.GetOwnerReferences() { + match := owner.Name == sub.Name && + owner.Kind == subscriptionGVK.Kind && + owner.APIVersion == subscriptionGVK.GroupVersion().String() + if match { + ownedInstallPlans = append(ownedInstallPlans, installPlan) + + break + } + } + } + + // InstallPlans are generally kept in order to provide a history of actions on the cluster, but + // they can be deleted without impacting the installed operator. So, not finding any should not + // be considered a reason for NonCompliance. + if len(ownedInstallPlans) == 0 { + err := r.updateStatus(ctx, policy, noInstallPlansCond, noInstallPlansObj(sub.Namespace)) + if err != nil { + return fmt.Errorf("error updating status when no relevant InstallPlans were found: %w", err) + } + + return nil + } + + OpLog := ctrl.LoggerFrom(ctx) + relatedInstallPlans := make([]policyv1.RelatedObject, len(ownedInstallPlans)) + ipsRequiringApproval := make([]unstructured.Unstructured, 0) + anyInstalling := false + currentPlanFailed := false + + // Construct the relevant relatedObjects, and collect any that might be considered for approval + for i, installPlan := range ownedInstallPlans { + phase, ok, err := unstructured.NestedString(installPlan.Object, "status", "phase") + if !ok && err == nil { + err = errors.New("the phase of the InstallPlan was not found") + } + + if err != nil { + OpLog.Error(err, "Unable to determine the phase of the related InstallPlan", + "InstallPlan.Name", installPlan.GetName()) + + // The InstallPlan will be added as unknown + phase = "" + } + + // consider some special phases + switch phase { + case string(operatorv1alpha1.InstallPlanPhaseRequiresApproval): + ipsRequiringApproval = append(ipsRequiringApproval, installPlan) + case string(operatorv1alpha1.InstallPlanPhaseInstalling): + anyInstalling = true + case string(operatorv1alpha1.InstallPlanFailed): + // Generally, a failed InstallPlan is not a reason for NonCompliance, because it could be from + // an old installation. But if the current InstallPlan is failed, we should alert the user. + if sub.Status.InstallPlanRef != nil && sub.Status.InstallPlanRef.Name == installPlan.GetName() { + currentPlanFailed = true + } + } + + relatedInstallPlans[i] = existingInstallPlanObj(&ownedInstallPlans[i], phase) + } + + if currentPlanFailed { + err := r.updateStatus(ctx, policy, installPlanFailed, relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status when the current InstallPlan has failed: %w", err) + } + + return nil + } + + if anyInstalling { + err := r.updateStatus(ctx, policy, installPlanInstallingCond, relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status when an installing InstallPlan was found: %w", err) + } + + return nil + } + + if len(ipsRequiringApproval) == 0 { + err := r.updateStatus(ctx, policy, installPlansFineCond, relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status when InstallPlans were fine: %w", err) + } + + return nil + } + + allUpgradeVersions := make([]string, len(ipsRequiringApproval)) + + for i, installPlan := range ipsRequiringApproval { + csvNames, ok, err := unstructured.NestedStringSlice(installPlan.Object, + "spec", "clusterServiceVersionNames") + if !ok && err != nil { + err = errors.New("the clusterServiceVersionNames field of the InstallPlan was not found") + } + + if err != nil { + OpLog.Error(err, "Unable to determine the csv names of the related InstallPlan", + "InstallPlan.Name", installPlan.GetName()) + + csvNames = []string{"???"} + } + + allUpgradeVersions[i] = fmt.Sprintf("%v", csvNames) + } + + // Only report this status in `inform` mode, because otherwise it could easily oscillate between this and + // another condition below when being enforced. + if policy.Spec.RemediationAction.IsInform() { + // FUTURE: check policy.spec.statusConfig.upgradesAvailable to determine `compliant`. + // For now this condition assumes it is set to 'NonCompliant' + err := r.updateStatus(ctx, policy, installPlanUpgradeCond(allUpgradeVersions, nil), relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status when an InstallPlan requiring approval was found: %w", err) + } + + return nil + } + + approvedVersion := "" // this will only be accurate when there is only one approvable InstallPlan + approvableInstallPlans := make([]unstructured.Unstructured, 0) + + for _, installPlan := range ipsRequiringApproval { + ipCSVs, ok, err := unstructured.NestedStringSlice(installPlan.Object, + "spec", "clusterServiceVersionNames") + if !ok && err != nil { + err = errors.New("the clusterServiceVersionNames field of the InstallPlan was not found") + } + + if err != nil { + OpLog.Error(err, "Unable to determine the csv names of the related InstallPlan", + "InstallPlan.Name", installPlan.GetName()) + + continue + } + + if len(ipCSVs) != 1 { + continue // Don't automate approving any InstallPlans for multiple CSVs + } + + matchingCSV := len(policy.Spec.Versions) == 0 // true if `spec.versions` is not specified + + for _, acceptableCSV := range policy.Spec.Versions { + if string(acceptableCSV) == ipCSVs[0] { + matchingCSV = true + + break + } + } + + if matchingCSV { + approvedVersion = ipCSVs[0] + + approvableInstallPlans = append(approvableInstallPlans, installPlan) + } + } + + if len(approvableInstallPlans) != 1 { + err := r.updateStatus(ctx, policy, + installPlanUpgradeCond(allUpgradeVersions, approvableInstallPlans), relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status when an InstallPlan can't be automatically approved: %w", err) + } + + return nil + } + + if err := unstructured.SetNestedField(approvableInstallPlans[0].Object, true, "spec", "approved"); err != nil { + return fmt.Errorf("error approving InstallPlan: %w", err) + } + + if err := r.Update(ctx, &approvableInstallPlans[0]); err != nil { + return fmt.Errorf("error updating approved InstallPlan: %w", err) + } + + err = r.updateStatus(ctx, policy, installPlanApprovedCond(approvedVersion), relatedInstallPlans...) + if err != nil { + return fmt.Errorf("error updating status after approving an InstallPlan: %w", err) + } + + return nil +} diff --git a/controllers/operatorpolicy_status.go b/controllers/operatorpolicy_status.go index 77bef570..a631b69b 100644 --- a/controllers/operatorpolicy_status.go +++ b/controllers/operatorpolicy_status.go @@ -202,6 +202,18 @@ func calculateComplianceCondition(policy *policyv1beta1.OperatorPolicy) metav1.C } } + idx, cond = policy.Status.GetCondition(installPlanConditionType) + if idx == -1 { + messages = append(messages, "the status of the InstallPlan is unknown") + foundNonCompliant = true + } else { + messages = append(messages, cond.Message) + + if cond.Status != metav1.ConditionTrue { + foundNonCompliant = true + } + } + idx, cond = policy.Status.GetCondition(csvConditionType) if idx == -1 { messages = append(messages, "the status of the ClusterServiceVersion is unknown") @@ -327,12 +339,13 @@ func (r *OperatorPolicyReconciler) emitComplianceEvent( } const ( - compliantConditionType = "Compliant" - opGroupConditionType = "OperatorGroupCompliant" - subConditionType = "SubscriptionCompliant" - csvConditionType = "ClusterServiceVersionCompliant" - deploymentConditionType = "DeploymentCompliant" - catalogSrcConditionType = "CatalogSourcesUnhealthy" + compliantConditionType = "Compliant" + opGroupConditionType = "OperatorGroupCompliant" + subConditionType = "SubscriptionCompliant" + csvConditionType = "ClusterServiceVersionCompliant" + deploymentConditionType = "DeploymentCompliant" + catalogSrcConditionType = "CatalogSourcesUnhealthy" + installPlanConditionType = "InstallPlanCompliant" ) func condType(kind string) string { @@ -416,6 +429,9 @@ func updatedCond(kind string) metav1.Condition { } } +// opGroupPreexistingCond is a Compliant condition with Reason 'PreexistingOperatorGroupFound', +// and Message 'the policy does not specify an OperatorGroup but one already exists in the +// namespace - assuming that OperatorGroup is correct' var opGroupPreexistingCond = metav1.Condition{ Type: opGroupConditionType, Status: metav1.ConditionTrue, @@ -424,8 +440,8 @@ var opGroupPreexistingCond = metav1.Condition{ "assuming that OperatorGroup is correct", } -// opGroupTooManyCond is a NonCompliant condition with a Reason like 'TooManyOperatorGroups', -// and a Message like 'there is more than one OperatorGroup in the namespace' +// opGroupTooManyCond is a NonCompliant condition with Reason 'TooManyOperatorGroups', +// and Message 'there is more than one OperatorGroup in the namespace' var opGroupTooManyCond = metav1.Condition{ Type: opGroupConditionType, Status: metav1.ConditionFalse, @@ -578,6 +594,82 @@ func catalogSrcUnknownObj(catalogName string, catalogNS string) policyv1.Related } } +// noInstallPlansCond is a Compliant condition with Reason 'NoInstallPlansFound', +// and Message 'there are no relevant InstallPlans in the namespace' +var noInstallPlansCond = metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionTrue, + Reason: "NoInstallPlansFound", + Message: "there are no relevant InstallPlans in the namespace", +} + +// installPlanFailed is a NonCompliant condition with Reason 'InstallPlanFailed' +// and message 'the current InstallPlan has failed' +var installPlanFailed = metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionFalse, + Reason: "InstallPlanFailed", + Message: "the current InstallPlan has failed", +} + +// installPlanInstallingCond is a NonCompliant condition with Reason 'InstallPlansInstalling' +// and message 'a relevant InstallPlan is actively installing' +var installPlanInstallingCond = metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionFalse, + Reason: "InstallPlansInstalling", + Message: "a relevant InstallPlan is actively installing", +} + +// installPlansFineCond is a Compliant condition with Reason 'InstallPlansFine' +// and message 'the relevant InstallPlans are fine' +var installPlansFineCond = metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionTrue, + Reason: "InstallPlansFine", + Message: "the relevant InstallPlans are fine", +} + +// installPlanUpgradeCond is a NonCompliant condition with Reason 'InstallPlanRequiresApproval' +// and a message detailing which possible updates are available +func installPlanUpgradeCond(versions []string, approvableIPs []unstructured.Unstructured) metav1.Condition { + // FUTURE: check policy.spec.statusConfig.upgradesAvailable to determine `compliant`. + // For now this condition assumes it is set to 'NonCompliant' + cond := metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionFalse, + Reason: "InstallPlanRequiresApproval", + } + + if len(versions) == 1 { + cond.Message = fmt.Sprintf("an InstallPlan to update to %v is available for approval", versions[0]) + } else { + cond.Message = fmt.Sprintf("there are multiple InstallPlans available for approval (%v)", + strings.Join(versions, ", or ")) + } + + if approvableIPs != nil && len(approvableIPs) == 0 { + cond.Message += " but not allowed by the specified versions in the policy" + } + + if len(approvableIPs) > 1 { + cond.Message += " but multiple of those match the versions specified in the policy" + } + + return cond +} + +// installPlanApprovedCond is a Compliant condition with Reason 'InstallPlanApproved' +// and a message like 'the InstallPlan for _____ was approved' +func installPlanApprovedCond(version string) metav1.Condition { + return metav1.Condition{ + Type: installPlanConditionType, + Status: metav1.ConditionTrue, + Reason: "InstallPlanApproved", + Message: fmt.Sprintf("the InstallPlan for %v was approved", version), + } +} + // missingWantedObj returns a NonCompliant RelatedObject with reason = 'Resource not found but should exist' func missingWantedObj(obj client.Object) policyv1.RelatedObject { return policyv1.RelatedObject{ @@ -645,7 +737,7 @@ func opGroupTooManyObjs(opGroups []unstructured.Unstructured) []policyv1.Related for i, opGroup := range opGroups { objs[i] = policyv1.RelatedObject{ - Object: policyv1.ObjectResourceFromObj(&opGroup), + Object: policyv1.ObjectResourceFromObj(&opGroups[i]), Compliant: string(policyv1.NonCompliant), Reason: "There is more than one OperatorGroup in this namespace", Properties: &policyv1.ObjectProperties{ @@ -735,6 +827,17 @@ func existingDeploymentObj(dep *appsv1.Deployment) policyv1.RelatedObject { } } +func nonCompObj(obj client.Object, reason string) policyv1.RelatedObject { + return policyv1.RelatedObject{ + Object: policyv1.ObjectResourceFromObj(obj), + Compliant: string(policyv1.NonCompliant), + Reason: reason, + Properties: &policyv1.ObjectProperties{ + UID: string(obj.GetUID()), + }, + } +} + // represents a lack of relevant deployments var noExistingDeploymentObj = policyv1.RelatedObject{ Object: policyv1.ObjectResource{ @@ -747,3 +850,47 @@ var noExistingDeploymentObj = policyv1.RelatedObject{ Compliant: string(policyv1.UnknownCompliancy), Reason: "No relevant deployments found", } + +// noInstallPlansObj returns a compliant RelatedObject with +// reason = 'There are no relevant InstallPlans in this namespace' +func noInstallPlansObj(namespace string) policyv1.RelatedObject { + return policyv1.RelatedObject{ + Object: policyv1.ObjectResource{ + Kind: installPlanGVK.Kind, + APIVersion: installPlanGVK.GroupVersion().String(), + Metadata: policyv1.ObjectMetadata{ + Name: "*", + Namespace: namespace, + }, + }, + Compliant: string(policyv1.Compliant), + Reason: "There are no relevant InstallPlans in this namespace", + } +} + +func existingInstallPlanObj(ip client.Object, phase string) policyv1.RelatedObject { + relObj := policyv1.RelatedObject{ + Object: policyv1.ObjectResourceFromObj(ip), + Properties: &policyv1.ObjectProperties{ + UID: string(ip.GetUID()), + }, + } + + if phase != "" { + relObj.Reason = "The InstallPlan is " + phase + } else { + relObj.Reason = "The InstallPlan is Unknown" + } + + switch phase { + case string(operatorv1alpha1.InstallPlanPhaseRequiresApproval): + // FUTURE: check policy.spec.statusConfig.upgradesAvailable to determine `compliant`. + // For now, assume it is set to 'NonCompliant' + relObj.Compliant = string(policyv1.NonCompliant) + case string(operatorv1alpha1.InstallPlanPhaseInstalling): + // if it's still installing, then it shouldn't be considered compliant yet. + relObj.Compliant = string(policyv1.NonCompliant) + } + + return relObj +} diff --git a/test/e2e/case38_install_operator_test.go b/test/e2e/case38_install_operator_test.go index 6f0e92af..1cd6762e 100644 --- a/test/e2e/case38_install_operator_test.go +++ b/test/e2e/case38_install_operator_test.go @@ -1,11 +1,15 @@ package e2e import ( + "context" "encoding/json" + "fmt" + "regexp" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" policyv1 "open-cluster-management.io/config-policy-controller/api/v1" policyv1beta1 "open-cluster-management.io/config-policy-controller/api/v1beta1" @@ -15,10 +19,12 @@ import ( var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, func() { const ( - opPolTestNS = "operator-policy-testns" - parentPolicyYAML = "../resources/case38_operator_install/parent-policy.yaml" - parentPolicyName = "parent-policy" - opPolTimeout = 30 + opPolTestNS = "operator-policy-testns" + parentPolicyYAML = "../resources/case38_operator_install/parent-policy.yaml" + parentPolicyName = "parent-policy" + eventuallyTimeout = 10 + consistentlyDuration = 5 + olmWaitTimeout = 45 ) check := func( @@ -32,7 +38,7 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun GinkgoHelper() unstructPolicy := utils.GetWithTimeout(clientManagedDynamic, gvrOperatorPolicy, polName, - opPolTestNS, true, opPolTimeout) + opPolTestNS, true, eventuallyTimeout) policyJSON, err := json.MarshalIndent(unstructPolicy.Object, "", " ") g.Expect(err).NotTo(HaveOccurred()) @@ -70,10 +76,11 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun g.Expect(idx).NotTo(Equal(-1)) g.Expect(actualCondition.Status).To(Equal(expectedCondition.Status)) g.Expect(actualCondition.Reason).To(Equal(expectedCondition.Reason)) - g.Expect(actualCondition.Message).To(Equal(expectedCondition.Message)) + g.Expect(actualCondition.Message).To(MatchRegexp( + fmt.Sprintf(".*%v.*", regexp.QuoteMeta(expectedCondition.Message)))) events := utils.GetMatchingEvents( - clientManaged, opPolTestNS, parentPolicyName, "", expectedEventMsgSnippet, opPolTimeout, + clientManaged, opPolTestNS, parentPolicyName, "", expectedEventMsgSnippet, eventuallyTimeout, ) g.Expect(events).NotTo(BeEmpty()) @@ -87,8 +94,8 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun } } - EventuallyWithOffset(1, checkFunc, opPolTimeout, 3).Should(Succeed()) - ConsistentlyWithOffset(1, checkFunc, 3, 1).Should(Succeed()) + EventuallyWithOffset(1, checkFunc, eventuallyTimeout, 3).Should(Succeed()) + ConsistentlyWithOffset(1, checkFunc, consistentlyDuration, 1).Should(Succeed()) } Describe("Testing OperatorGroup behavior when it is not specified in the policy", Ordered, func() { @@ -475,14 +482,15 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun Namespace: opPolTestNS, }, }, - Compliant: "Compliant", - Reason: "Resource found as expected", + Compliant: "NonCompliant", + Reason: "ConstraintsNotSatisfiable", }}, metav1.Condition{ - Type: "SubscriptionCompliant", - Status: metav1.ConditionTrue, - Reason: "SubscriptionMatches", - Message: "the Subscription matches what is required by the policy", + Type: "SubscriptionCompliant", + Status: metav1.ConditionFalse, + Reason: "ConstraintsNotSatisfiable", + Message: "no operators found from catalog operatorhubio-catalog in namespace fake " + + "referenced by subscription project-quay", }, "the Subscription was updated to match the policy", ) @@ -576,6 +584,19 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun }) It("Should generate conditions and relatedobjects of CSV", func() { + Eventually(func() string { + csv, _ := clientManagedDynamic.Resource(gvrClusterServiceVersion).Namespace(opPolTestNS). + Get(context.TODO(), "quay-operator.v3.8.13", metav1.GetOptions{}) + + if csv == nil { + return "" + } + + reason, _, _ := unstructured.NestedString(csv.Object, "status", "reason") + + return reason + }, olmWaitTimeout, 5).Should(Equal("InstallSucceeded")) + check( opPolName, false, @@ -635,6 +656,13 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun }) It("Should generate conditions and relatedobjects of CSV", func() { + Eventually(func() []unstructured.Unstructured { + csvList, _ := clientManagedDynamic.Resource(gvrClusterServiceVersion).Namespace(opPolTestNS). + List(context.TODO(), metav1.ListOptions{}) + + return csvList.Items + }, olmWaitTimeout, 5).ShouldNot(BeEmpty()) + check( opPolName, false, @@ -822,4 +850,252 @@ var _ = Describe("Test installing an operator from OperatorPolicy", Ordered, fun ) }) }) + Describe("Testing InstallPlan approval and status behavior", Ordered, func() { + const ( + opPolYAML = "../resources/case38_operator_install/operator-policy-manual-upgrades.yaml" + opPolName = "oppol-manual-upgrades" + subName = "strimzi-kafka-operator" + ) + + var ( + firstInstallPlanName string + secondInstallPlanName string + ) + + BeforeAll(func() { + utils.Kubectl("create", "ns", opPolTestNS) + DeferCleanup(func() { + utils.Kubectl("delete", "ns", opPolTestNS) + }) + + createObjWithParent(parentPolicyYAML, parentPolicyName, + opPolYAML, opPolTestNS, gvrPolicy, gvrOperatorPolicy) + }) + + It("Should initially report the ConstraintsNotSatisfiable Subscription", func() { + Eventually(func() interface{} { + sub, _ := clientManagedDynamic.Resource(gvrSubscription).Namespace(opPolTestNS). + Get(context.TODO(), subName, metav1.GetOptions{}) + + if sub == nil { + return "" + } + + conditions, _, _ := unstructured.NestedSlice(sub.Object, "status", "conditions") + for _, cond := range conditions { + condMap, ok := cond.(map[string]interface{}) + if !ok { + continue + } + + condType, _, _ := unstructured.NestedString(condMap, "type") + if condType == "ResolutionFailed" { + return condMap["status"] + } + } + + return nil + }, olmWaitTimeout, 5).Should(Equal("True")) + check( + opPolName, + true, + []policyv1.RelatedObject{{ + Object: policyv1.ObjectResource{ + Kind: "Subscription", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: subName, + }, + }, + Compliant: "NonCompliant", + Reason: "ConstraintsNotSatisfiable", + }}, + metav1.Condition{ + Type: "SubscriptionCompliant", + Status: metav1.ConditionFalse, + Reason: "ConstraintsNotSatisfiable", + Message: "no operators found with name strimzi-cluster-operator.v0.0.0.1337 in channel " + + "strimzi-0.36.x of package strimzi-kafka-operator in the catalog referenced by " + + "subscription strimzi-kafka-operator", + }, + "constraints not satisfiable", + ) + }) + It("Should initially report that no InstallPlans are found", func() { + check( + opPolName, + true, + []policyv1.RelatedObject{{ + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: "*", + }, + }, + Compliant: "Compliant", + Reason: "There are no relevant InstallPlans in this namespace", + }}, + metav1.Condition{ + Type: "InstallPlanCompliant", + Status: metav1.ConditionTrue, + Reason: "NoInstallPlansFound", + Message: "there are no relevant InstallPlans in the namespace", + }, + "there are no relevant InstallPlans in the namespace", + ) + }) + It("Should report an available upgrade", func() { + goodVersion := "strimzi-cluster-operator.v0.36.0" + utils.Kubectl("patch", "operatorpolicy", opPolName, "-n", opPolTestNS, "--type=json", "-p", + `[{"op": "replace", "path": "/spec/subscription/startingCSV", "value": "`+goodVersion+`"},`+ + `{"op": "replace", "path": "/spec/remediationAction", "value": "inform"}]`) + utils.Kubectl("patch", "subscription.operator", subName, "-n", opPolTestNS, "--type=json", "-p", + `[{"op": "replace", "path": "/spec/startingCSV", "value": "`+goodVersion+`"}]`) + Eventually(func() int { + ipList, _ := clientManagedDynamic.Resource(gvrInstallPlan).Namespace(opPolTestNS). + List(context.TODO(), metav1.ListOptions{}) + + return len(ipList.Items) + }, olmWaitTimeout, 5).Should(Equal(1)) + check( + opPolName, + true, + []policyv1.RelatedObject{{ + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + }, + }, + Compliant: "NonCompliant", + Reason: "The InstallPlan is RequiresApproval", + }}, + metav1.Condition{ + Type: "InstallPlanCompliant", + Status: metav1.ConditionFalse, + Reason: "InstallPlanRequiresApproval", + Message: "an InstallPlan to update to [strimzi-cluster-operator.v0.36.0] is available for approval", + }, + "an InstallPlan to update .* is available for approval", + ) + }) + It("Should do the upgrade when enforced, and stop at the next version", func() { + ipList, err := clientManagedDynamic.Resource(gvrInstallPlan).Namespace(opPolTestNS). + List(context.TODO(), metav1.ListOptions{}) + Expect(err).NotTo(HaveOccurred()) + Expect(ipList.Items).To(HaveLen(1)) + + firstInstallPlanName = ipList.Items[0].GetName() + + utils.Kubectl("patch", "operatorpolicy", opPolName, "-n", opPolTestNS, "--type=json", "-p", + `[{"op": "replace", "path": "/spec/remediationAction", "value": "enforce"}]`) + + Eventually(func() int { + ipList, err = clientManagedDynamic.Resource(gvrInstallPlan).Namespace(opPolTestNS). + List(context.TODO(), metav1.ListOptions{}) + + return len(ipList.Items) + }, olmWaitTimeout, 5).Should(Equal(2)) + + secondInstallPlanName = ipList.Items[1].GetName() + if firstInstallPlanName == secondInstallPlanName { + secondInstallPlanName = ipList.Items[0].GetName() + } + + Eventually(func() string { + ip, _ := clientManagedDynamic.Resource(gvrInstallPlan).Namespace(opPolTestNS). + Get(context.TODO(), firstInstallPlanName, metav1.GetOptions{}) + phase, _, _ := unstructured.NestedString(ip.Object, "status", "phase") + + return phase + }, olmWaitTimeout, 5).Should(Equal("Complete")) + + // This check covers several situations that occur quickly: the first InstallPlan eventually + // progresses to Complete after it is approved, and the next InstallPlan is created and + // recognized by the policy (but not yet approved). + check( + opPolName, + false, + []policyv1.RelatedObject{{ + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: firstInstallPlanName, + }, + }, + Reason: "The InstallPlan is Complete", + }, { + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: secondInstallPlanName, + }, + }, + Compliant: "NonCompliant", + Reason: "The InstallPlan is RequiresApproval", + }}, + metav1.Condition{ + Type: "InstallPlanCompliant", + Status: metav1.ConditionFalse, + Reason: "InstallPlanRequiresApproval", + Message: "an InstallPlan to update to [strimzi-cluster-operator.v0.36.1] is available for " + + "approval but not allowed by the specified versions in the policy", + }, + "the InstallPlan.*36.0.*was approved", + ) + }) + It("Should approve the next version when it's added to the spec", func() { + utils.Kubectl("patch", "operatorpolicy", opPolName, "-n", opPolTestNS, "--type=json", "-p", + `[{"op": "add", "path": "/spec/versions/-", "value": "strimzi-cluster-operator.v0.36.1"}]`) + + Eventually(func() string { + ip, _ := clientManagedDynamic.Resource(gvrInstallPlan).Namespace(opPolTestNS). + Get(context.TODO(), secondInstallPlanName, metav1.GetOptions{}) + phase, _, _ := unstructured.NestedString(ip.Object, "status", "phase") + + return phase + }, olmWaitTimeout, 5).Should(Equal("Complete")) + + check( + opPolName, + false, + []policyv1.RelatedObject{{ + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: firstInstallPlanName, + }, + }, + Reason: "The InstallPlan is Complete", + }, { + Object: policyv1.ObjectResource{ + Kind: "InstallPlan", + APIVersion: "operators.coreos.com/v1alpha1", + Metadata: policyv1.ObjectMetadata{ + Namespace: opPolTestNS, + Name: secondInstallPlanName, + }, + }, + Reason: "The InstallPlan is Complete", + }}, + metav1.Condition{ + Type: "InstallPlanCompliant", + Status: metav1.ConditionTrue, + Reason: "InstallPlansFine", + Message: "the relevant InstallPlans are fine", + }, + "the InstallPlan.*36.1.*was approved", + ) + }) + }) }) diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index a2ee84ec..a2cf6941 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -50,6 +50,8 @@ var ( gvrOperatorPolicy schema.GroupVersionResource gvrSubscription schema.GroupVersionResource gvrOperatorGroup schema.GroupVersionResource + gvrInstallPlan schema.GroupVersionResource + gvrClusterServiceVersion schema.GroupVersionResource defaultImageRegistry string ) @@ -93,6 +95,16 @@ var _ = BeforeSuite(func() { Version: "v1", Resource: "operatorgroups", } + gvrInstallPlan = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "installplans", + } + gvrClusterServiceVersion = schema.GroupVersionResource{ + Group: "operators.coreos.com", + Version: "v1alpha1", + Resource: "clusterserviceversions", + } gvrAPIService = schema.GroupVersionResource{ Group: "apiregistration.k8s.io", Version: "v1", diff --git a/test/resources/case38_operator_install/operator-policy-manual-upgrades.yaml b/test/resources/case38_operator_install/operator-policy-manual-upgrades.yaml new file mode 100644 index 00000000..2e0f6eb0 --- /dev/null +++ b/test/resources/case38_operator_install/operator-policy-manual-upgrades.yaml @@ -0,0 +1,26 @@ +apiVersion: policy.open-cluster-management.io/v1beta1 +kind: OperatorPolicy +metadata: + name: oppol-manual-upgrades + annotations: + policy.open-cluster-management.io/parent-policy-compliance-db-id: "124" + policy.open-cluster-management.io/policy-compliance-db-id: "64" + ownerReferences: + - apiVersion: policy.open-cluster-management.io/v1 + kind: Policy + name: parent-policy + uid: 12345678-90ab-cdef-1234-567890abcdef # must be replaced before creation +spec: + remediationAction: enforce + severity: medium + complianceType: musthave + subscription: + channel: strimzi-0.36.x + installPlanApproval: Manual + name: strimzi-kafka-operator + namespace: operator-policy-testns + source: operatorhubio-catalog + sourceNamespace: olm + startingCSV: strimzi-cluster-operator.v0.0.0.1337 # shouldn't match a real version + versions: + - strimzi-cluster-operator.v0.36.0