diff --git a/Dockerfile b/Dockerfile index 961037796a..b28073b651 100644 --- a/Dockerfile +++ b/Dockerfile @@ -33,6 +33,7 @@ COPY --from=builder /build/bin/olm /bin/olm COPY --from=builder /build/bin/catalog /bin/catalog COPY --from=builder /build/bin/package-server /bin/package-server COPY --from=builder /build/bin/cpb /bin/cpb +USER 1001 EXPOSE 8080 EXPOSE 5443 CMD ["/bin/olm"] diff --git a/Dockerfile.goreleaser b/Dockerfile.goreleaser index 6a3238d015..a117f6623e 100644 --- a/Dockerfile.goreleaser +++ b/Dockerfile.goreleaser @@ -10,4 +10,5 @@ COPY package-server /bin/package-server COPY cpb /bin/cpb EXPOSE 8080 EXPOSE 5443 +USER 1001 ENTRYPOINT ["/bin/olm"] diff --git a/cmd/catalog/main.go b/cmd/catalog/main.go index ead86cd763..20134d71d2 100644 --- a/cmd/catalog/main.go +++ b/cmd/catalog/main.go @@ -30,6 +30,7 @@ const ( defaultOPMImage = "quay.io/operator-framework/upstream-opm-builder:latest" defaultUtilImage = "quay.io/operator-framework/olm:latest" defaultOperatorName = "" + defaultWorkLoadUserID = int64(1001) ) // config flags defined globally so that they appear on the test binary as well @@ -83,6 +84,10 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { return fmt.Errorf("error configuring client: %s", err.Error()) } + workloadUserID := int64(-1) + if o.setWorkloadUserID { + workloadUserID = defaultWorkLoadUserID + } // TODO(tflannag): Use options pattern for catalog operator // Create a new instance of the operator. op, err := catalog.NewOperator( @@ -98,6 +103,7 @@ func (o *options) run(ctx context.Context, logger *logrus.Logger) error { k8sscheme.Scheme, o.installPlanTimeout, o.bundleUnpackTimeout, + workloadUserID, ) if err != nil { return fmt.Errorf("error configuring catalog operator: %s", err.Error()) diff --git a/cmd/catalog/start.go b/cmd/catalog/start.go index 45a0953c08..7161131880 100644 --- a/cmd/catalog/start.go +++ b/cmd/catalog/start.go @@ -25,6 +25,7 @@ type options struct { tlsKeyPath string tlsCertPath string clientCAPath string + setWorkloadUserID bool installPlanTimeout time.Duration bundleUnpackTimeout time.Duration @@ -66,6 +67,7 @@ func newRootCmd() *cobra.Command { cmd.Flags().StringVar(&o.opmImage, "opmImage", defaultOPMImage, "the image to use for unpacking bundle content with opm") cmd.Flags().StringVar(&o.utilImage, "util-image", defaultUtilImage, "an image containing custom olm utilities") cmd.Flags().StringVar(&o.writeStatusName, "writeStatusName", defaultOperatorName, "ClusterOperator name in which to write status, set to \"\" to disable.") + cmd.Flags().BoolVar(&o.setWorkloadUserID, "set-workload-user-id", false, "set user ID for all workloads (registry pods/bundle unpack jobs to default 1001") cmd.Flags().BoolVar(&o.debug, "debug", false, "use debug log level") cmd.Flags().BoolVar(&o.version, "version", false, "displays the olm version") diff --git a/deploy/chart/templates/0000_50_olm_00-namespace.yaml b/deploy/chart/templates/0000_50_olm_00-namespace.yaml index d42268a59b..6575fb2ea4 100644 --- a/deploy/chart/templates/0000_50_olm_00-namespace.yaml +++ b/deploy/chart/templates/0000_50_olm_00-namespace.yaml @@ -2,9 +2,15 @@ apiVersion: v1 kind: Namespace metadata: name: {{ .Values.namespace }} + labels: + pod-security.kubernetes.io/enforce: restricted + pod-security.kubernetes.io/enforce-version: latest --- apiVersion: v1 kind: Namespace metadata: name: {{ .Values.operator_namespace }} + labels: + pod-security.kubernetes.io/enforce: baseline + pod-security.kubernetes.io/enforce-version: latest diff --git a/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml b/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml index b71fbebd0d..825d39f0df 100644 --- a/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml +++ b/deploy/chart/templates/0000_50_olm_07-olm-operator.deployment.yaml @@ -17,6 +17,10 @@ spec: labels: app: olm-operator spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if or .Values.olm.tlsSecret .Values.olm.clientCASecret }} volumes: @@ -33,6 +37,10 @@ spec: {{- end }} containers: - name: olm-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] {{- if or .Values.olm.tlsSecret .Values.olm.clientCASecret }} volumeMounts: {{- end }} diff --git a/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml b/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml index e71ffb38b1..f3fbe99993 100644 --- a/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml +++ b/deploy/chart/templates/0000_50_olm_08-catalog-operator.deployment.yaml @@ -17,6 +17,10 @@ spec: labels: app: catalog-operator spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if or .Values.catalog.tlsSecret .Values.catalog.clientCASecret }} volumes: @@ -33,6 +37,10 @@ spec: {{- end }} containers: - name: catalog-operator + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] {{- if or .Values.catalog.tlsSecret .Values.catalog.clientCASecret }} volumeMounts: {{- end }} @@ -76,6 +84,8 @@ spec: - --client-ca - /profile-collector-cert/tls.crt {{- end }} + - --set-workload-user-id + - "true" image: {{ .Values.catalog.image.ref }} imagePullPolicy: {{ .Values.catalog.image.pullPolicy }} ports: diff --git a/deploy/chart/templates/_packageserver.deployment-spec.yaml b/deploy/chart/templates/_packageserver.deployment-spec.yaml index 6121a85bb5..75cdc5b508 100644 --- a/deploy/chart/templates/_packageserver.deployment-spec.yaml +++ b/deploy/chart/templates/_packageserver.deployment-spec.yaml @@ -14,6 +14,10 @@ spec: labels: app: packageserver spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault serviceAccountName: olm-operator-serviceaccount {{- if .Values.package.nodeSelector }} nodeSelector: @@ -25,6 +29,10 @@ spec: {{- end }} containers: - name: packageserver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: [ "ALL" ] command: - /bin/package-server - -v=4 @@ -61,10 +69,6 @@ spec: resources: {{ toYaml .Values.package.resources | indent 10 }} {{- end }} - {{- if .Values.package.securityContext }} - securityContext: - runAsUser: {{ .Values.package.securityContext.runAsUser }} - {{- end }} volumeMounts: - name: tmpfs mountPath: /tmp diff --git a/e2e.Dockerfile b/e2e.Dockerfile index 6428974668..4f450f2d9a 100644 --- a/e2e.Dockerfile +++ b/e2e.Dockerfile @@ -3,4 +3,5 @@ FROM busybox COPY olm catalog package-server wait cpb /bin/ EXPOSE 8080 EXPOSE 5443 +USER 1001 CMD ["/bin/olm"] diff --git a/pkg/controller/bundle/bundle_unpacker.go b/pkg/controller/bundle/bundle_unpacker.go index 55665aada6..bf797ba1fd 100644 --- a/pkg/controller/bundle/bundle_unpacker.go +++ b/pkg/controller/bundle/bundle_unpacker.go @@ -22,6 +22,7 @@ import ( listersbatchv1 "k8s.io/client-go/listers/batch/v1" listerscorev1 "k8s.io/client-go/listers/core/v1" listersrbacv1 "k8s.io/client-go/listers/rbac/v1" + "k8s.io/utils/pointer" "github.com/operator-framework/api/pkg/operators/reference" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" @@ -101,6 +102,11 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string // See: https://kubernetes.io/docs/concepts/workloads/controllers/job/#pod-backoff-failure-policy RestartPolicy: corev1.RestartPolicyNever, ImagePullSecrets: secrets, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -129,6 +135,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -148,6 +160,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -170,6 +188,12 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -193,7 +217,10 @@ func (c *ConfigMapUnpacker) job(cmRef *corev1.ObjectReference, bundlePath string job.SetNamespace(cmRef.Namespace) job.SetName(cmRef.Name) job.SetOwnerReferences([]metav1.OwnerReference{ownerRef(cmRef)}) - + if c.runAsUser > 0 { + job.Spec.Template.Spec.SecurityContext.RunAsUser = &c.runAsUser + job.Spec.Template.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // By default the BackoffLimit is set to 6 which with exponential backoff 10s + 20s + 40s ... // translates to ~10m of waiting time. // We want to fail faster than that when we have repeated failures from the bundle unpack pod @@ -246,6 +273,7 @@ type ConfigMapUnpacker struct { loader *configmap.BundleLoader now func() metav1.Time unpackTimeout time.Duration + runAsUser int64 } type ConfigMapUnpackerOption func(*ConfigMapUnpacker) @@ -335,6 +363,12 @@ func WithNow(now func() metav1.Time) ConfigMapUnpackerOption { } } +func WithUserID(id int64) ConfigMapUnpackerOption { + return func(unpacker *ConfigMapUnpacker) { + unpacker.runAsUser = id + } +} + func (c *ConfigMapUnpacker) apply(options ...ConfigMapUnpackerOption) { for _, option := range options { option(c) diff --git a/pkg/controller/bundle/bundle_unpacker_test.go b/pkg/controller/bundle/bundle_unpacker_test.go index 4e580ffc36..85fe2bbf62 100644 --- a/pkg/controller/bundle/bundle_unpacker_test.go +++ b/pkg/controller/bundle/bundle_unpacker_test.go @@ -15,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" k8sfake "k8s.io/client-go/kubernetes/fake" + "k8s.io/utils/pointer" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" crfake "github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned/fake" @@ -32,6 +33,7 @@ const ( opmImage = "opm-image" utilImage = "util-image" bundlePath = "bundle-path" + runAsUser = 1001 ) func TestConfigMapUnpacker(t *testing.T) { @@ -220,6 +222,13 @@ func TestConfigMapUnpacker(t *testing.T) { Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, ImagePullSecrets: []corev1.LocalObjectReference{{Name: "my-secret"}}, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -243,6 +252,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -262,6 +277,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -284,6 +305,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -397,6 +424,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -420,6 +454,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -439,6 +479,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -461,6 +507,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -615,6 +667,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -638,6 +697,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -657,6 +722,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -679,6 +750,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -827,6 +904,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -850,6 +934,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -869,6 +959,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -891,6 +987,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1009,6 +1111,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -1032,6 +1141,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -1051,6 +1166,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -1073,6 +1194,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1202,6 +1329,13 @@ func TestConfigMapUnpacker(t *testing.T) { }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: pointer.Bool(true), + RunAsUser: pointer.Int64(runAsUser), + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, Containers: []corev1.Container{ { Name: "extract", @@ -1225,6 +1359,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, InitContainers: []corev1.Container{ @@ -1244,6 +1384,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, { Name: "pull", @@ -1266,6 +1412,12 @@ func TestConfigMapUnpacker(t *testing.T) { corev1.ResourceMemory: resource.MustParse("50Mi"), }, }, + SecurityContext: &corev1.SecurityContext{ + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, + }, }, }, Volumes: []corev1.Volume{ @@ -1341,6 +1493,7 @@ func TestConfigMapUnpacker(t *testing.T) { WithUtilImage(utilImage), WithNow(now), WithUnpackTimeout(defaultUnpackDuration), + WithUserID(int64(runAsUser)), ) require.NoError(t, err) diff --git a/pkg/controller/operators/catalog/operator.go b/pkg/controller/operators/catalog/operator.go index 658cd93a34..536d51bece 100644 --- a/pkg/controller/operators/catalog/operator.go +++ b/pkg/controller/operators/catalog/operator.go @@ -125,7 +125,7 @@ type Operator struct { type CatalogSourceSyncFunc func(logger *logrus.Entry, in *v1alpha1.CatalogSource) (out *v1alpha1.CatalogSource, continueSync bool, syncError error) // NewOperator creates a new Catalog Operator. -func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration) (*Operator, error) { +func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clock, logger *logrus.Logger, resync time.Duration, configmapRegistryImage, opmImage, utilImage string, operatorNamespace string, scheme *runtime.Scheme, installPlanTimeout time.Duration, bundleUnpackTimeout time.Duration, workloadUserID int64) (*Operator, error) { resyncPeriod := queueinformer.ResyncWithJitter(resync, 0.2) config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) if err != nil { @@ -194,7 +194,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo op.sources = grpc.NewSourceStore(logger, 10*time.Second, 10*time.Minute, op.syncSourceState) op.sourceInvalidator = resolver.SourceProviderFromRegistryClientProvider(op.sources, logger) resolverSourceProvider := NewOperatorGroupToggleSourceProvider(op.sourceInvalidator, logger, op.lister.OperatorsV1().OperatorGroupLister()) - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, opClient, configmapRegistryImage, op.now, ssaClient, workloadUserID) res := resolver.NewOperatorStepResolver(lister, crClient, operatorNamespace, resolverSourceProvider, logger) op.resolver = resolver.NewInstrumentedResolver(res, metrics.RegisterDependencyResolutionSuccess, metrics.RegisterDependencyResolutionFailure) @@ -433,6 +433,7 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo bundle.WithUtilImage(utilImage), bundle.WithNow(op.now), bundle.WithUnpackTimeout(op.bundleUnpackTimeout), + bundle.WithUserID(workloadUserID), ) if err != nil { return nil, err @@ -1404,13 +1405,6 @@ type UnpackedBundleReference struct { Properties string `json:"properties"` } -/* unpackBundles makes one walk through the bundlelookups and attempts to progress them -Returns: - unpacked: bool - If the bundle was successfully unpacked - out: *v1alpha1.InstallPlan - the resulting installPlan - error: error -*/ - func (o *Operator) unpackBundles(plan *v1alpha1.InstallPlan) (bool, *v1alpha1.InstallPlan, error) { out := plan.DeepCopy() unpacked := true diff --git a/pkg/controller/operators/catalog/operator_test.go b/pkg/controller/operators/catalog/operator_test.go index b7e2431480..51b07250c7 100644 --- a/pkg/controller/operators/catalog/operator_test.go +++ b/pkg/controller/operators/catalog/operator_test.go @@ -1606,7 +1606,7 @@ func NewFakeOperator(ctx context.Context, namespace string, namespaces []string, } applier := controllerclient.NewFakeApplier(s, "testowner") - op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier) + op.reconciler = reconciler.NewRegistryReconcilerFactory(lister, op.opClient, "test:pod", op.now, applier, 1001) } op.RunInformers(ctx) @@ -1758,7 +1758,7 @@ func toManifest(t *testing.T, obj runtime.Object) string { } func pod(s v1alpha1.CatalogSource) *corev1.Pod { - pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10) + pod := reconciler.Pod(&s, "registry-server", s.Spec.Image, s.GetName(), s.GetLabels(), s.GetAnnotations(), 5, 10, 1001) ownerutil.AddOwner(pod, &s, false, false) return pod } diff --git a/pkg/controller/registry/reconciler/configmap.go b/pkg/controller/registry/reconciler/configmap.go index 0f8518d4c5..f264d1bee3 100644 --- a/pkg/controller/registry/reconciler/configmap.go +++ b/pkg/controller/registry/reconciler/configmap.go @@ -23,6 +23,7 @@ import ( // configMapCatalogSourceDecorator wraps CatalogSource to add additional methods type configMapCatalogSourceDecorator struct { *v1alpha1.CatalogSource + runAsUser int64 } const ( @@ -101,7 +102,7 @@ func (s *configMapCatalogSourceDecorator) Service() *corev1.Service { } func (s *configMapCatalogSourceDecorator) Pod(image string) *corev1.Pod { - pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5) + pod := Pod(s.CatalogSource, "configmap-registry-server", image, "", s.Labels(), s.Annotations(), 5, 5, s.runAsUser) pod.Spec.ServiceAccountName = s.GetName() + ConfigMapServerPostfix pod.Spec.Containers[0].Command = []string{"configmap-server", "-c", s.Spec.ConfigMap, "-n", s.GetNamespace()} ownerutil.AddOwner(pod, s.CatalogSource, false, false) @@ -162,10 +163,11 @@ func (s *configMapCatalogSourceDecorator) RoleBinding() *rbacv1.RoleBinding { } type ConfigMapRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - Image string + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + Image string + createPodAsUser int64 } var _ RegistryEnsurer = &ConfigMapRegistryReconciler{} @@ -240,7 +242,7 @@ func (c *ConfigMapRegistryReconciler) currentPodsWithCorrectResourceVersion(sour // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *ConfigMapRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { @@ -389,7 +391,7 @@ func (c *ConfigMapRegistryReconciler) ensureService(source configMapCatalogSourc // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *ConfigMapRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := configMapCatalogSourceDecorator{catalogSource} + source := configMapCatalogSourceDecorator{catalogSource, c.createPodAsUser} image := c.Image if source.Spec.SourceType == "grpc" { diff --git a/pkg/controller/registry/reconciler/configmap_test.go b/pkg/controller/registry/reconciler/configmap_test.go index ddd0218661..281217a1bf 100644 --- a/pkg/controller/registry/reconciler/configmap_test.go +++ b/pkg/controller/registry/reconciler/configmap_test.go @@ -30,6 +30,7 @@ import ( const ( registryImageName = "test:image" + runAsUser = 1001 testNamespace = "testns" ) @@ -104,6 +105,7 @@ func fakeReconcilerFactory(t *testing.T, stopc <-chan struct{}, options ...fakeR OpClient: opClientFake, Lister: lister, ConfigMapServerImage: config.configMapServerImage, + createPodAsUser: runAsUser, } var hasSyncedCheckFns []cache.InformerSynced @@ -186,7 +188,7 @@ func objectsForCatalogSource(catsrc *v1alpha1.CatalogSource) []runtime.Object { var objs []runtime.Object switch catsrc.Spec.SourceType { case v1alpha1.SourceTypeInternal, v1alpha1.SourceTypeConfigmap: - decorated := configMapCatalogSourceDecorator{catsrc} + decorated := configMapCatalogSourceDecorator{catsrc, runAsUser} objs = clientfake.AddSimpleGeneratedNames( clientfake.AddSimpleGeneratedName(decorated.Pod(registryImageName)), decorated.Service(), @@ -196,7 +198,7 @@ func objectsForCatalogSource(catsrc *v1alpha1.CatalogSource) []runtime.Object { ) case v1alpha1.SourceTypeGrpc: if catsrc.Spec.Image != "" { - decorated := grpcCatalogSourceDecorator{catsrc} + decorated := grpcCatalogSourceDecorator{catsrc, runAsUser} objs = clientfake.AddSimpleGeneratedNames( decorated.Pod(catsrc.GetName()), decorated.Service(), @@ -451,7 +453,7 @@ func TestConfigMapRegistryReconciler(t *testing.T) { } // if no error, the reconciler should create the same set of kube objects every time - decorated := configMapCatalogSourceDecorator{tt.in.catsrc} + decorated := configMapCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(registryImageName) listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{CatalogSourceLabelKey: tt.in.catsrc.GetName()}).String()} diff --git a/pkg/controller/registry/reconciler/grpc.go b/pkg/controller/registry/reconciler/grpc.go index 4f63cc87aa..93e28e6286 100644 --- a/pkg/controller/registry/reconciler/grpc.go +++ b/pkg/controller/registry/reconciler/grpc.go @@ -32,6 +32,7 @@ const ( // grpcCatalogSourceDecorator wraps CatalogSource to add additional methods type grpcCatalogSourceDecorator struct { *v1alpha1.CatalogSource + createPodAsUser int64 } type UpdateNotReadyErr struct { @@ -122,16 +123,17 @@ func (s *grpcCatalogSourceDecorator) ServiceAccount() *corev1.ServiceAccount { } func (s *grpcCatalogSourceDecorator) Pod(saName string) *corev1.Pod { - pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10) + pod := Pod(s.CatalogSource, "registry-server", s.Spec.Image, saName, s.Labels(), s.Annotations(), 5, 10, s.createPodAsUser) ownerutil.AddOwner(pod, s.CatalogSource, false, false) return pod } type GrpcRegistryReconciler struct { - now nowFunc - Lister operatorlister.OperatorLister - OpClient operatorclient.ClientInterface - SSAClient *controllerclient.ServerSideApplier + now nowFunc + Lister operatorlister.OperatorLister + OpClient operatorclient.ClientInterface + SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } var _ RegistryReconciler = &GrpcRegistryReconciler{} @@ -198,7 +200,7 @@ func (c *GrpcRegistryReconciler) currentPodsWithCorrectImageAndSpec(source grpcC // EnsureRegistryServer ensures that all components of registry server are up to date. func (c *GrpcRegistryReconciler) EnsureRegistryServer(catalogSource *v1alpha1.CatalogSource) error { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // if service status is nil, we force create every object to ensure they're created the first time overwrite := source.Status.RegistryServiceStatus == nil || !isRegistryServiceStatusValid(&source) @@ -447,7 +449,7 @@ func (c *GrpcRegistryReconciler) removePods(pods []*corev1.Pod, namespace string // CheckRegistryServer returns true if the given CatalogSource is considered healthy; false otherwise. func (c *GrpcRegistryReconciler) CheckRegistryServer(catalogSource *v1alpha1.CatalogSource) (healthy bool, err error) { - source := grpcCatalogSourceDecorator{catalogSource} + source := grpcCatalogSourceDecorator{catalogSource, c.createPodAsUser} // Check on registry resources // TODO: add gRPC health check if len(c.currentPodsWithCorrectImageAndSpec(source, source.ServiceAccount().GetName())) < 1 || diff --git a/pkg/controller/registry/reconciler/grpc_test.go b/pkg/controller/registry/reconciler/grpc_test.go index 42a2bde3ad..c219329b3d 100644 --- a/pkg/controller/registry/reconciler/grpc_test.go +++ b/pkg/controller/registry/reconciler/grpc_test.go @@ -331,7 +331,7 @@ func TestGrpcRegistryReconciler(t *testing.T) { } // Check for resource existence - decorated := grpcCatalogSourceDecorator{tt.in.catsrc} + decorated := grpcCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(tt.in.catsrc.GetName()) service := decorated.Service() sa := decorated.ServiceAccount() @@ -421,7 +421,7 @@ func TestRegistryPodPriorityClass(t *testing.T) { require.NoError(t, err) // Check for resource existence - decorated := grpcCatalogSourceDecorator{tt.in.catsrc} + decorated := grpcCatalogSourceDecorator{tt.in.catsrc, runAsUser} pod := decorated.Pod(tt.in.catsrc.GetName()) listOptions := metav1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set{CatalogSourceLabelKey: tt.in.catsrc.GetName()}).String()} outPods, podErr := client.KubernetesInterface().CoreV1().Pods(pod.GetNamespace()).List(context.TODO(), listOptions) diff --git a/pkg/controller/registry/reconciler/reconciler.go b/pkg/controller/registry/reconciler/reconciler.go index 6089d2e777..45bf8dcba4 100644 --- a/pkg/controller/registry/reconciler/reconciler.go +++ b/pkg/controller/registry/reconciler/reconciler.go @@ -10,6 +10,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/utils/pointer" operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" controllerclient "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/controller-runtime/client" @@ -61,6 +62,7 @@ type registryReconcilerFactory struct { OpClient operatorclient.ClientInterface ConfigMapServerImage string SSAClient *controllerclient.ServerSideApplier + createPodAsUser int64 } // ReconcilerForSource returns a RegistryReconciler based on the configuration of the given CatalogSource. @@ -69,18 +71,20 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha switch source.Spec.SourceType { case operatorsv1alpha1.SourceTypeInternal, operatorsv1alpha1.SourceTypeConfigmap: return &ConfigMapRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - Image: r.ConfigMapServerImage, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + Image: r.ConfigMapServerImage, + createPodAsUser: r.createPodAsUser, } case operatorsv1alpha1.SourceTypeGrpc: if source.Spec.Image != "" { return &GrpcRegistryReconciler{ - now: r.now, - Lister: r.Lister, - OpClient: r.OpClient, - SSAClient: r.SSAClient, + now: r.now, + Lister: r.Lister, + OpClient: r.OpClient, + SSAClient: r.SSAClient, + createPodAsUser: r.createPodAsUser, } } else if source.Spec.Address != "" { return &GrpcAddressRegistryReconciler{ @@ -92,17 +96,18 @@ func (r *registryReconcilerFactory) ReconcilerForSource(source *operatorsv1alpha } // NewRegistryReconcilerFactory returns an initialized RegistryReconcilerFactory. -func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier) RegistryReconcilerFactory { +func NewRegistryReconcilerFactory(lister operatorlister.OperatorLister, opClient operatorclient.ClientInterface, configMapServerImage string, now nowFunc, ssaClient *controllerclient.ServerSideApplier, createPodAsUser int64) RegistryReconcilerFactory { return ®istryReconcilerFactory{ now: now, Lister: lister, OpClient: opClient, ConfigMapServerImage: configMapServerImage, SSAClient: ssaClient, + createPodAsUser: createPodAsUser, } } -func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32) *corev1.Pod { +func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saName string, labels map[string]string, annotations map[string]string, readinessDelay int32, livenessDelay int32, runAsUser int64) *corev1.Pod { // Ensure the catalog image is always pulled if the image is not based on a digest, measured by whether an "@" is included. // See https://github.com/docker/distribution/blob/master/reference/reference.go for more info. // This means recreating non-digest based catalog pods will result in the latest version of the catalog content being delivered on-cluster. @@ -125,8 +130,6 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN podAnnotations[key] = value } - readOnlyRootFilesystem := false - pod := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ GenerateName: source.GetName() + "-", @@ -179,7 +182,11 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN }, }, SecurityContext: &corev1.SecurityContext{ - ReadOnlyRootFilesystem: &readOnlyRootFilesystem, + ReadOnlyRootFilesystem: pointer.Bool(false), + AllowPrivilegeEscalation: pointer.Bool(false), + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, }, ImagePullPolicy: pullPolicy, TerminationMessagePolicy: corev1.TerminationMessageFallbackToLogsOnError, @@ -188,10 +195,19 @@ func Pod(source *operatorsv1alpha1.CatalogSource, name string, image string, saN NodeSelector: map[string]string{ "kubernetes.io/os": "linux", }, + SecurityContext: &corev1.PodSecurityContext{ + SeccompProfile: &corev1.SeccompProfile{ + Type: corev1.SeccompProfileTypeRuntimeDefault, + }, + }, ServiceAccountName: saName, }, } + if runAsUser > 0 { + pod.Spec.SecurityContext.RunAsUser = &runAsUser + pod.Spec.SecurityContext.RunAsNonRoot = pointer.Bool(true) + } // Override scheduling options if specified if source.Spec.GrpcPodConfig != nil { grpcPodConfig := source.Spec.GrpcPodConfig diff --git a/pkg/controller/registry/reconciler/reconciler_test.go b/pkg/controller/registry/reconciler/reconciler_test.go index ad9c82c6de..1ed7eed2fa 100644 --- a/pkg/controller/registry/reconciler/reconciler_test.go +++ b/pkg/controller/registry/reconciler/reconciler_test.go @@ -10,6 +10,8 @@ import ( "github.com/operator-framework/api/pkg/operators/v1alpha1" ) +const workloadUserID = 1001 + func TestPodNodeSelector(t *testing.T) { catsrc := &v1alpha1.CatalogSource{ ObjectMeta: metav1.ObjectMeta{ @@ -21,7 +23,7 @@ func TestPodNodeSelector(t *testing.T) { key := "kubernetes.io/os" value := "linux" - gotCatSrcPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0)) + gotCatSrcPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0), int64(workloadUserID)) gotCatSrcPodSelector := gotCatSrcPod.Spec.NodeSelector if gotCatSrcPodSelector[key] != value { @@ -69,7 +71,7 @@ func TestPullPolicy(t *testing.T) { } for _, tt := range table { - p := Pod(source, "catalog", tt.image, "", nil, nil, int32(0), int32(0)) + p := Pod(source, "catalog", tt.image, "", nil, nil, int32(0), int32(0), int64(workloadUserID)) policy := p.Spec.Containers[0].ImagePullPolicy if policy != tt.policy { t.Fatalf("expected pull policy %s for image %s", tt.policy, tt.image) @@ -79,8 +81,13 @@ func TestPullPolicy(t *testing.T) { func TestPodContainerSecurityContext(t *testing.T) { expectedReadOnlyRootFilesystem := false + allowPrivilegeEscalation := false expectedContainerSecCtx := &corev1.SecurityContext{ - ReadOnlyRootFilesystem: &expectedReadOnlyRootFilesystem, + ReadOnlyRootFilesystem: &expectedReadOnlyRootFilesystem, + AllowPrivilegeEscalation: &allowPrivilegeEscalation, + Capabilities: &corev1.Capabilities{ + Drop: []corev1.Capability{"ALL"}, + }, } catsrc := &v1alpha1.CatalogSource{ @@ -90,7 +97,7 @@ func TestPodContainerSecurityContext(t *testing.T) { }, } - gotPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0)) + gotPod := Pod(catsrc, "hello", "busybox", "", map[string]string{}, map[string]string{}, int32(0), int32(0), int64(workloadUserID)) gotContainerSecCtx := gotPod.Spec.Containers[0].SecurityContext require.Equal(t, expectedContainerSecCtx, gotContainerSecCtx) } @@ -115,7 +122,7 @@ func TestPodAvoidsConcurrentWrite(t *testing.T) { "annotation": "somethingelse", } - gotPod := Pod(catsrc, "hello", "busybox", "", labels, annotations, int32(0), int32(0)) + gotPod := Pod(catsrc, "hello", "busybox", "", labels, annotations, int32(0), int32(0), int64(workloadUserID)) // check labels and annotations point to different addresses between parameters and what's in the pod require.NotEqual(t, &labels, &gotPod.Labels) @@ -295,7 +302,7 @@ func TestPodSchedulingOverrides(t *testing.T) { } for _, testCase := range testCases { - pod := Pod(testCase.catalogSource, "hello", "busybox", "", map[string]string{}, testCase.annotations, int32(0), int32(0)) + pod := Pod(testCase.catalogSource, "hello", "busybox", "", map[string]string{}, testCase.annotations, int32(0), int32(0), int64(workloadUserID)) require.Equal(t, testCase.expectedNodeSelectors, pod.Spec.NodeSelector) require.Equal(t, testCase.expectedPriorityClassName, pod.Spec.PriorityClassName) require.Equal(t, testCase.expectedTolerations, pod.Spec.Tolerations) diff --git a/test/e2e/operator_groups_e2e_test.go b/test/e2e/operator_groups_e2e_test.go index 413374eb8f..553db457ea 100644 --- a/test/e2e/operator_groups_e2e_test.go +++ b/test/e2e/operator_groups_e2e_test.go @@ -1799,8 +1799,7 @@ var _ = Describe("Operator Group", func() { // Versions of OLM at 0.14.1 and older had a bug that would place the wrong namespace annotation on copied CSVs, // preventing them from being GCd. This ensures that any leftover CSVs in that state are properly cleared up. - // issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2644 - It("[FLAKE] cleanup csvs with bad owner operator groups", func() { + It("cleanup csvs with bad namespace annotation", func() { csvName := genName("another-csv-") // must be lowercase for DNS-1123 validation @@ -2013,18 +2012,28 @@ var _ = Describe("Operator Group", func() { return false, nil }) require.NoError(GinkgoT(), err) - - // Give copied CSV a bad operatorgroup annotation - updateCSV := func() error { - fetchedCSV, err := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) - require.NoError(GinkgoT(), err) + GinkgoT().Log("Copied CSV showed up in other namespace, giving copied CSV a bad OpertorGroup annotation") + err = wait.Poll(pollInterval, pollDuration, func() (bool, error) { + fetchedCSV, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) + if fetchErr != nil { + return false, fetchErr + } fetchedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] = fetchedCSV.GetNamespace() - _, err = crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{}) - return err - } - require.NoError(GinkgoT(), retry.RetryOnConflict(retry.DefaultBackoff, updateCSV)) - - // wait for CSV to be gc'd + _, updateErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Update(context.TODO(), fetchedCSV, metav1.UpdateOptions{}) + if updateErr != nil { + return false, updateErr + } + updatedCSV, updatedfetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) + if updatedfetchErr != nil { + return false, updatedfetchErr + } + if updatedCSV.Annotations[v1.OperatorGroupNamespaceAnnotationKey] == fetchedCSV.GetNamespace() { + return true, nil + } + return false, nil + }) + require.NoError(GinkgoT(), err) + GinkgoT().Log("Done updating copied CSV with bad annotation OperatorGroup, waiting for CSV to be gc'd") err = wait.Poll(pollInterval, 2*pollDuration, func() (bool, error) { csv, fetchErr := crc.OperatorsV1alpha1().ClusterServiceVersions(otherNamespaceName).Get(context.TODO(), csvName, metav1.GetOptions{}) if fetchErr != nil { @@ -2034,8 +2043,12 @@ var _ = Describe("Operator Group", func() { GinkgoT().Logf("Error (in %v): %v", opGroupNamespace, fetchErr.Error()) return false, fetchErr } - GinkgoT().Logf("%#v", csv.Annotations) - GinkgoT().Logf(csv.GetNamespace()) + // The CSV with the wrong annotation could have been replaced with a new copied CSV by this time + // If we find a CSV in the namespace, and it contains the correct annotation, it means the CSV + // with the wrong annotation was GCed + if csv.Annotations[v1.OperatorGroupNamespaceAnnotationKey] != csv.GetNamespace() { + return true, nil + } return false, nil }) require.NoError(GinkgoT(), err)