Skip to content

Commit

Permalink
pkg/controller: label RBAC with content hash (#3034)
Browse files Browse the repository at this point in the history
When a CSV is processed, it is assumed that the InstallPlan has already
run, or that a user that's creating a CSV as their entrypoint into the
system has otherwise met all the preconditions for the CSV to exist.

As part of validating these preconditions, the CSV logic today uses
cluster-scoped listers for all RBAC resources. sets up an in-memory
authorizer for these, and queries the CSV install strategie's
permissions against those.

We would like to restrict the amount of memory OLM uses, and part of
that is not caching the world. For the above approach to work, all RBAC
objects fulfilling CSV permission preconditions would need to be
labelled. In the case that a user is creating a CSV manually, this will
not be the case.

We can use the SubjectAccessReview API to check for the presence of
permissions without caching the world, but since a PolicyRule has slices
of verbs, resources, subjects, etc and the SAR endpoint accepts but one
of each, there will be (in the general case) a combinatorical explosion
of calls to issue enough SARs to cover the full set of permissions.

Therefore, we need to limit the amount of times we take that action. A
simple optimization is to check for permissions created directly by OLM,
as that's by far the most common entrypoint into the system (a user
creates a Subscription, that triggers an InstallPlan, which creates the
RBAC).

As OLM chose to name the RBAC objects with random strings of characters,
it's not possible to look at a list of permissions in a CSV and know
which resources OLM would have created. Therefore, this PR adds a label
to all relevant RBAC resources with the hash of their content. We
already have the name of the CSV, but since CSV content is ostensibly
mutable, this is not enough.

Signed-off-by: Steve Kuznetsov <skuznets@redhat.com>
  • Loading branch information
stevekuznetsov committed Sep 18, 2023
1 parent 46f4f89 commit 8eb4f3e
Show file tree
Hide file tree
Showing 6 changed files with 183 additions and 8 deletions.
29 changes: 26 additions & 3 deletions pkg/controller/operators/catalog/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,6 @@ type Operator struct {
client versioned.Interface
dynamicClient dynamic.Interface
lister operatorlister.OperatorLister
k8sLabelQueueSets map[schema.GroupVersionResource]workqueue.RateLimitingInterface
catsrcQueueSet *queueinformer.ResourceQueueSet
subQueueSet *queueinformer.ResourceQueueSet
ipQueueSet *queueinformer.ResourceQueueSet
Expand Down Expand Up @@ -202,7 +201,6 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo
lister: lister,
namespace: operatorNamespace,
recorder: eventRecorder,
k8sLabelQueueSets: map[schema.GroupVersionResource]workqueue.RateLimitingInterface{},
catsrcQueueSet: queueinformer.NewEmptyResourceQueueSet(),
subQueueSet: queueinformer.NewEmptyResourceQueueSet(),
ipQueueSet: queueinformer.NewEmptyResourceQueueSet(),
Expand Down Expand Up @@ -386,11 +384,12 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo
if canFilter {
return nil
}
op.k8sLabelQueueSets[gvr] = workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{
queue := workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{
Name: gvr.String(),
})
queueInformer, err := queueinformer.NewQueueInformer(
ctx,
queueinformer.WithQueue(queue),
queueinformer.WithLogger(op.logger),
queueinformer.WithInformer(informer),
queueinformer.WithSyncer(sync.ToSyncer()),
Expand All @@ -416,6 +415,18 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo
)); err != nil {
return nil, err
}
if err := labelObjects(rolesgvk, roleInformer.Informer(), labeller.ContentHashLabeler[*rbacv1.Role, *rbacv1applyconfigurations.RoleApplyConfiguration](
ctx, op.logger, labeller.ContentHashFilter,
func(role *rbacv1.Role) (string, error) {
return resolver.PolicyRuleHashLabelValue(role.Rules)
},
rbacv1applyconfigurations.Role,
func(namespace string, ctx context.Context, cfg *rbacv1applyconfigurations.RoleApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.Role, error) {
return op.opClient.KubernetesInterface().RbacV1().Roles(namespace).Apply(ctx, cfg, opts)
},
)); err != nil {
return nil, err
}

// Wire RoleBindings
roleBindingInformer := k8sInformerFactory.Rbac().V1().RoleBindings()
Expand All @@ -432,6 +443,18 @@ func NewOperator(ctx context.Context, kubeconfigPath string, clock utilclock.Clo
)); err != nil {
return nil, err
}
if err := labelObjects(rolebindingsgvk, roleBindingInformer.Informer(), labeller.ContentHashLabeler[*rbacv1.RoleBinding, *rbacv1applyconfigurations.RoleBindingApplyConfiguration](
ctx, op.logger, labeller.ContentHashFilter,
func(roleBinding *rbacv1.RoleBinding) (string, error) {
return resolver.RoleReferenceAndSubjectHashLabelValue(roleBinding.RoleRef, roleBinding.Subjects)
},
rbacv1applyconfigurations.RoleBinding,
func(namespace string, ctx context.Context, cfg *rbacv1applyconfigurations.RoleBindingApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.RoleBinding, error) {
return op.opClient.KubernetesInterface().RbacV1().RoleBindings(namespace).Apply(ctx, cfg, opts)
},
)); err != nil {
return nil, err
}

// Wire ServiceAccounts
serviceAccountInformer := k8sInformerFactory.Core().V1().ServiceAccounts()
Expand Down
16 changes: 16 additions & 0 deletions pkg/controller/operators/labeller/filters.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ import (
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/internal/alongside"
)

func ContentHashFilter(object metav1.Object) bool {
return HasOLMOwnerRef(object) && !hasHashLabel(object)
}

func Filter(gvr schema.GroupVersionResource) func(metav1.Object) bool {
if f, ok := filters[gvr]; ok {
return f
Expand Down Expand Up @@ -80,6 +84,18 @@ func Validate(ctx context.Context, logger *logrus.Logger, metadataClient metadat
allFilters[batchv1.SchemeGroupVersion.WithResource("jobs")] = JobFilter(func(namespace, name string) (metav1.Object, error) {
return metadataClient.Resource(corev1.SchemeGroupVersion.WithResource("configmaps")).Namespace(namespace).Get(ctx, name, metav1.GetOptions{})
})

for _, gvr := range []schema.GroupVersionResource{
rbacv1.SchemeGroupVersion.WithResource("roles"),
rbacv1.SchemeGroupVersion.WithResource("rolebindings"),
rbacv1.SchemeGroupVersion.WithResource("clusterroles"),
rbacv1.SchemeGroupVersion.WithResource("clusterrolebindings"),
} {
previous := allFilters[gvr]
allFilters[gvr] = func(object metav1.Object) bool {
return previous != nil && previous(object) && ContentHashFilter(object)
}
}
for gvr, filter := range allFilters {
gvr, filter := gvr, filter
g.Go(func() error {
Expand Down
56 changes: 56 additions & 0 deletions pkg/controller/operators/labeller/rbac.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
package labeller

import (
"context"
"fmt"

"github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry/resolver"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/queueinformer"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

func hasHashLabel(obj metav1.Object) bool {
_, ok := obj.GetLabels()[resolver.ContentHashLabelKey]
return ok
}

func ContentHashLabeler[T metav1.Object, A ApplyConfig[A]](
ctx context.Context,
logger *logrus.Logger,
check func(metav1.Object) bool,
hasher func(object T) (string, error),
applyConfigFor func(name, namespace string) A,
apply func(namespace string, ctx context.Context, cfg A, opts metav1.ApplyOptions) (T, error),
) queueinformer.LegacySyncHandler {
return func(obj interface{}) error {
cast, ok := obj.(T)
if !ok {
err := fmt.Errorf("wrong type %T, expected %T: %#v", obj, new(T), obj)
logger.WithError(err).Error("casting failed")
return fmt.Errorf("casting failed: %w", err)
}

if _, _, ok := ownerutil.GetOwnerByKindLabel(cast, v1alpha1.ClusterServiceVersionKind); !ok {
return nil
}

if !check(cast) || hasHashLabel(cast) {
return nil
}

hash, err := hasher(cast)
if err != nil {
return fmt.Errorf("failed to calculate hash: %w", err)
}

cfg := applyConfigFor(cast.GetName(), cast.GetNamespace())
cfg.WithLabels(map[string]string{
resolver.ContentHashLabelKey: hash,
})
_, err = apply(cast.GetNamespace(), ctx, cfg, metav1.ApplyOptions{})
return err
}
}
33 changes: 30 additions & 3 deletions pkg/controller/operators/olm/operator.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,6 @@ type Operator struct {
opClient operatorclient.ClientInterface
client versioned.Interface
lister operatorlister.OperatorLister
k8sLabelQueueSets map[schema.GroupVersionResource]workqueue.RateLimitingInterface
protectedCopiedCSVNamespaces map[string]struct{}
copiedCSVLister metadatalister.Lister
ogQueueSet *queueinformer.ResourceQueueSet
Expand Down Expand Up @@ -162,7 +161,6 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat
resolver: config.strategyResolver,
apiReconciler: config.apiReconciler,
lister: lister,
k8sLabelQueueSets: map[schema.GroupVersionResource]workqueue.RateLimitingInterface{},
recorder: eventRecorder,
apiLabeler: config.apiLabeler,
csvIndexers: map[string]cache.Indexer{},
Expand Down Expand Up @@ -453,11 +451,12 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat
if canFilter {
return nil
}
op.k8sLabelQueueSets[gvr] = workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{
queue := workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{
Name: gvr.String(),
})
queueInformer, err := queueinformer.NewQueueInformer(
ctx,
queueinformer.WithQueue(queue),
queueinformer.WithLogger(op.logger),
queueinformer.WithInformer(informer),
queueinformer.WithSyncer(sync.ToSyncer()),
Expand Down Expand Up @@ -557,6 +556,20 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat
)); err != nil {
return nil, err
}
if err := labelObjects(clusterrolesgvk, clusterRoleInformer.Informer(), labeller.ContentHashLabeler[*rbacv1.ClusterRole, *rbacv1applyconfigurations.ClusterRoleApplyConfiguration](
ctx, op.logger, labeller.ContentHashFilter,
func(clusterRole *rbacv1.ClusterRole) (string, error) {
return resolver.PolicyRuleHashLabelValue(clusterRole.Rules)
},
func(name, _ string) *rbacv1applyconfigurations.ClusterRoleApplyConfiguration {
return rbacv1applyconfigurations.ClusterRole(name)
},
func(_ string, ctx context.Context, cfg *rbacv1applyconfigurations.ClusterRoleApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.ClusterRole, error) {
return op.opClient.KubernetesInterface().RbacV1().ClusterRoles().Apply(ctx, cfg, opts)
},
)); err != nil {
return nil, err
}

clusterRoleBindingInformer := k8sInformerFactory.Rbac().V1().ClusterRoleBindings()
informersByNamespace[metav1.NamespaceAll].ClusterRoleBindingInformer = clusterRoleBindingInformer
Expand Down Expand Up @@ -586,6 +599,20 @@ func newOperatorWithConfig(ctx context.Context, config *operatorConfig) (*Operat
)); err != nil {
return nil, err
}
if err := labelObjects(clusterrolebindingssgvk, clusterRoleBindingInformer.Informer(), labeller.ContentHashLabeler[*rbacv1.ClusterRoleBinding, *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration](
ctx, op.logger, labeller.ContentHashFilter,
func(clusterRoleBinding *rbacv1.ClusterRoleBinding) (string, error) {
return resolver.RoleReferenceAndSubjectHashLabelValue(clusterRoleBinding.RoleRef, clusterRoleBinding.Subjects)
},
func(name, _ string) *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration {
return rbacv1applyconfigurations.ClusterRoleBinding(name)
},
func(_ string, ctx context.Context, cfg *rbacv1applyconfigurations.ClusterRoleBindingApplyConfiguration, opts metav1.ApplyOptions) (*rbacv1.ClusterRoleBinding, error) {
return op.opClient.KubernetesInterface().RbacV1().ClusterRoleBindings().Apply(ctx, cfg, opts)
},
)); err != nil {
return nil, err
}

// register namespace queueinformer
namespaceInformer := k8sInformerFactory.Core().V1().Namespaces()
Expand Down
54 changes: 54 additions & 0 deletions pkg/controller/registry/resolver/rbac.go
Original file line number Diff line number Diff line change
@@ -1,8 +1,11 @@
package resolver

import (
"crypto/sha256"
"encoding/json"
"fmt"
"hash/fnv"
"math/big"

corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
Expand Down Expand Up @@ -62,6 +65,37 @@ func (o *OperatorPermissions) AddClusterRoleBinding(clusterRoleBinding *rbacv1.C
o.ClusterRoleBindings = append(o.ClusterRoleBindings, clusterRoleBinding)
}

const ContentHashLabelKey = "olm.permissions.hash"

func PolicyRuleHashLabelValue(rules []rbacv1.PolicyRule) (string, error) {
raw, err := json.Marshal(rules)
if err != nil {
return "", err
}
return toBase62(sha256.Sum224(raw)), nil
}

func RoleReferenceAndSubjectHashLabelValue(roleRef rbacv1.RoleRef, subjects []rbacv1.Subject) (string, error) {
var container = struct {
RoleRef rbacv1.RoleRef
Subjects []rbacv1.Subject
}{
RoleRef: roleRef,
Subjects: subjects,
}
raw, err := json.Marshal(&container)
if err != nil {
return "", err
}
return toBase62(sha256.Sum224(raw)), nil
}

func toBase62(hash [28]byte) string {
var i big.Int
i.SetBytes(hash[:])
return i.Text(62)
}

func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[string]*OperatorPermissions, error) {
permissions := map[string]*OperatorPermissions{}

Expand Down Expand Up @@ -100,6 +134,11 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri
},
Rules: permission.Rules,
}
hash, err := PolicyRuleHashLabelValue(permission.Rules)
if err != nil {
return nil, fmt.Errorf("failed to hash permission rules: %w", err)
}
role.Labels[ContentHashLabelKey] = hash
permissions[permission.ServiceAccountName].AddRole(role)

// Create RoleBinding
Expand All @@ -120,6 +159,11 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri
Namespace: csv.GetNamespace(),
}},
}
hash, err = RoleReferenceAndSubjectHashLabelValue(roleBinding.RoleRef, roleBinding.Subjects)
if err != nil {
return nil, fmt.Errorf("failed to hash binding content: %w", err)
}
roleBinding.Labels[ContentHashLabelKey] = hash
permissions[permission.ServiceAccountName].AddRoleBinding(roleBinding)
}

Expand All @@ -142,6 +186,11 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri
},
Rules: permission.Rules,
}
hash, err := PolicyRuleHashLabelValue(permission.Rules)
if err != nil {
return nil, fmt.Errorf("failed to hash permission rules: %w", err)
}
role.Labels[ContentHashLabelKey] = hash
permissions[permission.ServiceAccountName].AddClusterRole(role)

// Create ClusterRoleBinding
Expand All @@ -162,6 +211,11 @@ func RBACForClusterServiceVersion(csv *v1alpha1.ClusterServiceVersion) (map[stri
Namespace: csv.GetNamespace(),
}},
}
hash, err = RoleReferenceAndSubjectHashLabelValue(roleBinding.RoleRef, roleBinding.Subjects)
if err != nil {
return nil, fmt.Errorf("failed to hash binding content: %w", err)
}
roleBinding.Labels[ContentHashLabelKey] = hash
permissions[permission.ServiceAccountName].AddClusterRoleBinding(roleBinding)
}
return permissions, nil
Expand Down
3 changes: 1 addition & 2 deletions pkg/package-server/provider/registry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ func TestRegistryProviderList(t *testing.T) {
globalNS: "ns",
requestNamespace: "wisconsin",
expectedErr: "",
expected: &operators.PackageManifestList{Items: []operators.PackageManifest{}},
expected: &operators.PackageManifestList{},
},
{
name: "PackagesFound",
Expand Down Expand Up @@ -1230,7 +1230,6 @@ func TestRegistryProviderList(t *testing.T) {
} else {
require.Nil(t, err)
}

require.Equal(t, len(test.expected.Items), len(packageManifestList.Items))
require.ElementsMatch(t, test.expected.Items, packageManifestList.Items)
})
Expand Down

0 comments on commit 8eb4f3e

Please sign in to comment.