Skip to content

Commit

Permalink
resource controller: fix placement computation
Browse files Browse the repository at this point in the history
Signed-off-by: David Festal <dfestal@redhat.com>
  • Loading branch information
davidfestal committed Oct 5, 2022
1 parent fce723b commit 8075087
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 4 deletions.
14 changes: 11 additions & 3 deletions pkg/reconciler/workload/resource/resource_reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,8 +176,10 @@ func computePlacement(ns *corev1.Namespace, obj metav1.Object) (annotationPatch
// create merge patch
annotationPatch = map[string]interface{}{}
labelPatch = map[string]interface{}{}
// unschedule objects on locations where the namespace is not scheduled
for _, loc := range objLocations.Difference(nsLocations).List() {
// location was removed from namespace, but is still on the object
// Location was removed from namespace, but is still on the object
// That's an inconsistent state, probably due to the namespace deletion reaching its grace period => let's repair it
var hasSyncerFinalizer, hasClusterFinalizer bool
// Check if there's still the syncer or the cluster finalizer.
for _, finalizer := range obj.GetFinalizers() {
Expand All @@ -199,16 +201,22 @@ func computePlacement(ns *corev1.Namespace, obj metav1.Object) (annotationPatch
}
}
}
for _, loc := range nsLocations.Intersection(nsLocations).List() {
// sync deletion timestamps if both namespace and object are scheduled
for _, loc := range nsLocations.Intersection(objLocations).List() {
if nsTimestamp, found := ns.Annotations[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc]; found && validRFC3339(nsTimestamp) {
objTimestamp, found := obj.GetAnnotations()[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc]
if !found || !validRFC3339(objTimestamp) {
annotationPatch[workloadv1alpha1.InternalClusterDeletionTimestampAnnotationPrefix+loc] = nsTimestamp
}
}
}
// set label on unscheduled objects if namespace is scheduled and not deleting
for _, loc := range nsLocations.Difference(objLocations).List() {
// location was missing on the object
if nsDeleting.Has(loc) {
// if the namespace is removing resources from this location
// then do not add the location label on the object
continue
}
// TODO(sttts): add way to go into pending state first, maybe with a namespace annotation
labelPatch[workloadv1alpha1.ClusterResourceStateLabelPrefix+loc] = string(workloadv1alpha1.ResourceStateSync)
}
Expand Down
8 changes: 8 additions & 0 deletions pkg/reconciler/workload/resource/resource_reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,14 @@ func TestComputePlacement(t *testing.T) {
"state.workload.kcp.dev/cluster-1": "Sync",
},
},
{name: "syncing but deleting namespace, unscheduled object, don't schedule the object at all",
ns: namespace(map[string]string{
"deletion.internal.workload.kcp.dev/cluster-1": "2002-10-02T10:00:00-05:00",
}, map[string]string{
"state.workload.kcp.dev/cluster-1": "Sync",
}),
obj: object(nil, nil, nil, nil),
},
{name: "new location on namespace",
ns: namespace(nil, map[string]string{
"state.workload.kcp.dev/cluster-1": "Sync",
Expand Down
12 changes: 11 additions & 1 deletion test/e2e/reconciler/scheduling/placement_scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ import (
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
Expand Down Expand Up @@ -136,13 +137,18 @@ func TestPlacementUpdate(t *testing.T) {
return true
}, wait.ForeverTestTimeout, time.Millisecond*100)

firstSyncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncerFixture.SyncerConfig.SyncTargetWorkspace, firstSyncTargetName)

t.Logf("Create a service in the user workspace")
_, err = kubeClusterClient.CoreV1().Services("default").Create(logicalcluster.WithCluster(ctx, userClusterName), &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "first",
Labels: map[string]string{
"test.workload.kcp.dev": firstSyncTargetName,
},
Annotations: map[string]string{
"finalizers.workload.kcp.dev/" + firstSyncTargetKey: "wait-a-bit",
},
},
Spec: corev1.ServiceSpec{
Ports: []corev1.ServicePort{
Expand All @@ -154,7 +160,6 @@ func TestPlacementUpdate(t *testing.T) {
},
}, metav1.CreateOptions{})
require.NoError(t, err)
firstSyncTargetKey := workloadv1alpha1.ToSyncTargetKey(syncerFixture.SyncerConfig.SyncTargetWorkspace, firstSyncTargetName)

t.Logf("Wait for the service to have the sync label")
framework.Eventually(t, func() (bool, string) {
Expand Down Expand Up @@ -233,6 +238,11 @@ func TestPlacementUpdate(t *testing.T) {
return true, ""
}, wait.ForeverTestTimeout, time.Millisecond*100)

t.Logf("Remove the soft finalizer on the service")
_, err = kubeClusterClient.CoreV1().Services("default").Patch(logicalcluster.WithCluster(ctx, userClusterName), "first", types.MergePatchType,
[]byte("{\"metadata\":{\"annotations\":{\"deletion.internal.workload.kcp.dev/"+firstSyncTargetKey+"\":\"\"}}}"), metav1.PatchOptions{})
require.NoError(t, err)

t.Logf("Wait for the service to be removed in the downstream cluster")
require.Eventually(t, func() bool {
downstreamServices, err = syncerFixture.DownstreamKubeClient.CoreV1().Services("").List(ctx, metav1.ListOptions{
Expand Down

0 comments on commit 8075087

Please sign in to comment.