Skip to content

Commit

Permalink
Merge pull request #695 from securesign/tturek/rekor-sharding-config
Browse files Browse the repository at this point in the history
fix(rekor): delete unassigned sharding configmaps
  • Loading branch information
openshift-merge-bot[bot] authored Oct 23, 2024
2 parents 7320b17 + a3212a7 commit cf329cd
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 42 deletions.
58 changes: 29 additions & 29 deletions internal/controller/rekor/actions/server/sharding_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,41 +69,16 @@ func (i shardingConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Reko
return i.Continue()
} else {
i.Logger.Info("Remove invalid ConfigMap with rekor-server configuration", "Name", cfg.Name)
_ = i.Client.Delete(ctx, cfg)
err = i.Client.Delete(ctx, cfg)
if err != nil {
i.Logger.Error(err, "Failed to remove ConfigMap", "name", cfg.Name)
}
}
}
}
// invalidate
instance.Status.ServerConfigRef = nil

// try to discover existing config
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(labels).String())
if err != nil {
i.Logger.Error(err, "problem with finding configmap", "namespace", instance.Namespace)
}
for _, partialSecret := range partialConfigs.Items {
cm, err := kubernetes.GetConfigMap(ctx, i.Client, partialSecret.Namespace, partialSecret.Name)
if err != nil {
return i.Failed(fmt.Errorf("can't load configMap data %w", err))
}
if reflect.DeepEqual(cm.Data, content) && instance.Status.ServerConfigRef == nil {
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigDiscovered", "Existing ConfigMap with sharding configuration discovered: %s", cm.Name)
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: cm.Name}
meta.SetStatusCondition(&instance.Status.Conditions, metav1.Condition{
Type: actions.ServerCondition,
Status: metav1.ConditionFalse,
Reason: constants.Creating,
Message: "Sharding config discovered",
})
} else {
i.Logger.Info("Remove invalid ConfigMap with rekor-server configuration", "Name", cm.Name)
_ = i.Client.Delete(ctx, cm)
}
}
if instance.Status.ServerConfigRef != nil {
return i.StatusUpdate(ctx, instance)
}

// create new config
newConfig := kubernetes.CreateImmutableConfigmap(cmName, instance.Namespace, labels, content)
if err = controllerutil.SetControllerReference(instance, newConfig, i.Client.Scheme()); err != nil {
Expand All @@ -120,6 +95,31 @@ func (i shardingConfig) Handle(ctx context.Context, instance *rhtasv1alpha1.Reko
})
return i.FailedWithStatusUpdate(ctx, err, instance)
}

// remove old server configmaps
partialConfigs, err := kubernetes.ListConfigMaps(ctx, i.Client, instance.Namespace, labels2.SelectorFromSet(labels).String())
if err != nil {
i.Logger.Error(err, "problem with finding configmap")
}
for _, partialConfig := range partialConfigs.Items {
if partialConfig.Name == newConfig.Name {
continue
}

err = i.Client.Delete(ctx, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: partialConfig.Name,
Namespace: partialConfig.Namespace,
},
})
if err != nil {
i.Logger.Error(err, "problem with deleting configmap", "name", partialConfig.Name)
} else {
i.Logger.Info("Remove invalid ConfigMap with rekor-sharding configuration", "name", partialConfig.Name)
i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigDeleted", "ConfigMap with sharding configuration deleted: %s", partialConfig.Name)
}
}

i.Recorder.Eventf(instance, v1.EventTypeNormal, "ShardingConfigCreated", "ConfigMap with sharding configuration created: %s", newConfig.Name)
instance.Status.ServerConfigRef = &rhtasv1alpha1.LocalObjectReference{Name: newConfig.Name}

Expand Down
110 changes: 97 additions & 13 deletions internal/controller/rekor/actions/server/sharding_config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/watch"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
)
Expand Down Expand Up @@ -99,7 +100,7 @@ func TestShardingConfig_Handle(t *testing.T) {
}
type want struct {
result *action.Result
verify func(Gomega, client.WithWatch)
verify func(Gomega, client.WithWatch, <-chan watch.Event)
}
tests := []struct {
name string
Expand All @@ -115,7 +116,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -124,6 +125,13 @@ func TestShardingConfig_Handle(t *testing.T) {
cm := v1.ConfigMap{}
g.Expect(c.Get(context.TODO(), types.NamespacedName{Name: r.Status.ServerConfigRef.Name, Namespace: rekorNN.Namespace}, &cm)).To(Succeed())
g.Expect(cm.Data).Should(HaveKeyWithValue(shardingConfigName, ""))

g.Expect(events).To(HaveLen(1))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(cm.Name)),
)))
},
},
},
Expand All @@ -147,7 +155,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -160,6 +168,13 @@ func TestShardingConfig_Handle(t *testing.T) {
rlr := make([]rhtasv1alpha1.RekorLogRange, 0)
g.Expect(yaml.Unmarshal([]byte(cm.Data[shardingConfigName]), &rlr)).To(Succeed())
g.Expect(rlr).Should(Equal(r.Spec.Sharding))

g.Expect(events).To(HaveLen(1))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(cm.Name)),
)))
},
},
},
Expand Down Expand Up @@ -196,7 +211,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -210,6 +225,18 @@ func TestShardingConfig_Handle(t *testing.T) {
rlr := make([]rhtasv1alpha1.RekorLogRange, 0)
g.Expect(yaml.Unmarshal([]byte(cm.Data[shardingConfigName]), &rlr)).To(Succeed())
g.Expect(rlr).Should(Equal(r.Spec.Sharding))

g.Expect(events).To(HaveLen(2))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Deleted)),
WithTransform(getEventObjectName, Equal(cmName+"old")),
)))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(cm.Name)),
)))
},
},
},
Expand Down Expand Up @@ -237,7 +264,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -251,6 +278,18 @@ func TestShardingConfig_Handle(t *testing.T) {
rlr := make([]rhtasv1alpha1.RekorLogRange, 0)
g.Expect(yaml.Unmarshal([]byte(cm.Data[shardingConfigName]), &rlr)).To(Succeed())
g.Expect(rlr).Should(Equal(r.Spec.Sharding))

g.Expect(events).To(HaveLen(2))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Deleted)),
WithTransform(getEventObjectName, Equal(cmName+"old")),
)))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(cm.Name)),
)))
},
},
},
Expand All @@ -271,7 +310,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.Continue(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand Down Expand Up @@ -316,7 +355,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.Continue(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -329,6 +368,8 @@ func TestShardingConfig_Handle(t *testing.T) {
rlr := make([]rhtasv1alpha1.RekorLogRange, 0)
g.Expect(yaml.Unmarshal([]byte(cm.Data[shardingConfigName]), &rlr)).To(Succeed())
g.Expect(rlr).Should(Equal(r.Spec.Sharding))

g.Expect(events).To(BeEmpty())
},
},
},
Expand All @@ -342,7 +383,7 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
Expand All @@ -352,11 +393,18 @@ func TestShardingConfig_Handle(t *testing.T) {
cm := v1.ConfigMap{}
g.Expect(c.Get(context.TODO(), types.NamespacedName{Name: r.Status.ServerConfigRef.Name, Namespace: rekorNN.Namespace}, &cm)).To(Succeed())
g.Expect(cm.Data).Should(HaveKeyWithValue(shardingConfigName, ""))

g.Expect(events).To(HaveLen(1))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(cm.Name)),
)))
},
},
},
{
name: "use existing config",
name: "delete unassigned sharding configmap",
env: env{
spec: rhtasv1alpha1.RekorSpec{},
status: rhtasv1alpha1.RekorStatus{},
Expand All @@ -370,11 +418,23 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
g.Expect(r.Status.ServerConfigRef.Name).Should(Equal(cmName + "old"))
g.Expect(r.Status.ServerConfigRef.Name).ShouldNot(Equal(cmName + "old"))

g.Expect(events).To(HaveLen(2))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(r.Status.ServerConfigRef.Name)),
)))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Deleted)),
WithTransform(getEventObjectName, Equal(cmName+"old")),
)))
},
},
},
Expand All @@ -398,14 +458,26 @@ func TestShardingConfig_Handle(t *testing.T) {
},
want: want{
result: testAction.StatusUpdate(),
verify: func(g Gomega, c client.WithWatch) {
verify: func(g Gomega, c client.WithWatch, events <-chan watch.Event) {
r := rhtasv1alpha1.Rekor{}
g.Expect(c.Get(context.TODO(), rekorNN, &r)).To(Succeed())
g.Expect(r.Status.ServerConfigRef).ShouldNot(BeNil())
g.Expect(r.Status.ServerConfigRef.Name).Should(Not(Equal(cmName + "old")))

g.Expect(c.Get(context.TODO(), types.NamespacedName{Name: cmName + "old", Namespace: rekorNN.Namespace}, &v1.ConfigMap{})).To(HaveOccurred())
g.Expect(c.Get(context.TODO(), types.NamespacedName{Name: "keep", Namespace: rekorNN.Namespace}, &v1.ConfigMap{})).To(Succeed())

g.Expect(events).To(HaveLen(2))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Added)),
WithTransform(getEventObjectName, Equal(r.Status.ServerConfigRef.Name)),
)))
g.Expect(events).To(Receive(
And(
WithTransform(getEventType, Equal(watch.Deleted)),
WithTransform(getEventObjectName, Equal(cmName+"old")),
)))
},
},
},
Expand Down Expand Up @@ -433,14 +505,26 @@ func TestShardingConfig_Handle(t *testing.T) {
WithObjects(tt.env.objects...).
Build()

watchCm, err := c.Watch(ctx, &v1.ConfigMapList{}, client.InNamespace("default"))
g.Expect(err).To(Not(HaveOccurred()))

a := testAction.PrepareAction(c, NewShardingConfigAction())

if got := a.Handle(ctx, instance); !reflect.DeepEqual(got, tt.want.result) {
t.Errorf("CanHandle() = %v, want %v", got, tt.want.result)
}
watchCm.Stop()
if tt.want.verify != nil {
tt.want.verify(g, c)
tt.want.verify(g, c, watchCm.ResultChan())
}
})
}
}

func getEventType(e watch.Event) watch.EventType {
return e.Type
}

func getEventObjectName(e watch.Event) string {
return e.Object.(client.Object).GetName()
}

0 comments on commit cf329cd

Please sign in to comment.