From 2a68f306ff55ad91dbe7f1b877d0d6096f7a67b7 Mon Sep 17 00:00:00 2001 From: Frederic Giloux Date: Wed, 8 Feb 2023 22:16:38 +0100 Subject: [PATCH 1/4] Fix issue when GenerateName size limit is reached Signed-off-by: Frederic Giloux --- .../partitionset/partitioning_test.go | 10 ++++++ .../topology/partitionset/resources.go | 31 +++++++++++++------ 2 files changed, 32 insertions(+), 9 deletions(-) diff --git a/pkg/reconciler/topology/partitionset/partitioning_test.go b/pkg/reconciler/topology/partitionset/partitioning_test.go index 4bb0c7aca52..87b411e7445 100644 --- a/pkg/reconciler/topology/partitionset/partitioning_test.go +++ b/pkg/reconciler/topology/partitionset/partitioning_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" ) @@ -134,3 +135,12 @@ func TestPartition(t *testing.T) { require.Equal(t, "prod", v["environment"], "Expected that all partitions have a label selector for environment = prod") } } + +func TestGeneratePartitionName(t *testing.T) { + name := generatePartitionName( + "partitionset", + map[string]string{"region": "europe", "cloud": "EKS", "verylong": "123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"}, + []string{"region", "verylong"}, + ) + require.Equal(t, "partitionset-europe-123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890"[:validation.DNS1123SubdomainMaxLength-1], name) +} diff --git a/pkg/reconciler/topology/partitionset/resources.go b/pkg/reconciler/topology/partitionset/resources.go index 9054663f328..14f118a4365 100644 --- a/pkg/reconciler/topology/partitionset/resources.go +++ b/pkg/reconciler/topology/partitionset/resources.go @@ -21,6 +21,7 @@ import ( "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" topologyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/topology/v1alpha1" ) @@ -28,17 +29,10 @@ import ( // generatePartition generates the Partition specifications based on // the provided matchExpressions and matchLabels. func generatePartition(name string, matchExpressions []metav1.LabelSelectorRequirement, matchLabels map[string]string, dimensions []string) *topologyv1alpha1.Partition { - pname := name - labels := make([]string, len(dimensions)) - copy(labels, dimensions) - sort.Strings(labels) - for _, label := range labels { - pname = pname + "-" + strings.ToLower(matchLabels[label]) - } - + name = generatePartitionName(name, matchLabels, dimensions) return &topologyv1alpha1.Partition{ ObjectMeta: metav1.ObjectMeta{ - GenerateName: pname + "-", + GenerateName: name + "-", }, Spec: topologyv1alpha1.PartitionSpec{ Selector: &metav1.LabelSelector{ @@ -48,3 +42,22 @@ func generatePartition(name string, matchExpressions []metav1.LabelSelectorRequi }, } } + +// generatePartitionName creates a name based on the dimension values. +func generatePartitionName(name string, matchLabels map[string]string, dimensions []string) string { + labels := make([]string, len(dimensions)) + copy(labels, dimensions) + sort.Strings(labels) + for _, label := range labels { + name = name + "-" + strings.ToLower(matchLabels[label]) + } + name = name[:min(validation.DNS1123SubdomainMaxLength-1, len(name))] + return name +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} From ea2a0acc3bfb7f7184b37d802913a863fe4f11d2 Mon Sep 17 00:00:00 2001 From: Frederic Giloux Date: Wed, 8 Feb 2023 22:17:53 +0100 Subject: [PATCH 2/4] Fix issue when duplicate dimensions are provided Signed-off-by: Frederic Giloux --- .../topology/partitionset/partitionset_reconcile.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/reconciler/topology/partitionset/partitionset_reconcile.go b/pkg/reconciler/topology/partitionset/partitionset_reconcile.go index 375abef122d..f99c2b02584 100644 --- a/pkg/reconciler/topology/partitionset/partitionset_reconcile.go +++ b/pkg/reconciler/topology/partitionset/partitionset_reconcile.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/klog/v2" + "k8s.io/kube-openapi/pkg/util/sets" corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" conditionsv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/third_party/conditions/apis/conditions/v1alpha1" @@ -89,10 +90,12 @@ func (c *controller) reconcile(ctx context.Context, partitionSet *topologyv1alph } var matchLabelsMap map[string]map[string]string + // remove duplicates + dimensions := sets.NewString(partitionSet.Spec.Dimensions...).List() if partitionSet.Spec.ShardSelector != nil { - matchLabelsMap = partition(shards, partitionSet.Spec.Dimensions, partitionSet.Spec.ShardSelector.MatchLabels) + matchLabelsMap = partition(shards, dimensions, partitionSet.Spec.ShardSelector.MatchLabels) } else { - matchLabelsMap = partition(shards, partitionSet.Spec.Dimensions, nil) + matchLabelsMap = partition(shards, dimensions, nil) } partitionSet.Status.Count = uint16(len(matchLabelsMap)) existingMatches := map[string]struct{}{} @@ -162,7 +165,7 @@ func (c *controller) reconcile(ctx context.Context, partitionSet *topologyv1alph // Create partitions when no existing partition for the set has the same selector. for key, matchLabels := range matchLabelsMap { if _, ok := existingMatches[key]; !ok { - partition := generatePartition(partitionSet.Name, newMatchExpressions, matchLabels, partitionSet.Spec.Dimensions) + partition := generatePartition(partitionSet.Name, newMatchExpressions, matchLabels, dimensions) partition.OwnerReferences = []metav1.OwnerReference{ *metav1.NewControllerRef(partitionSet, topologyv1alpha1.SchemeGroupVersion.WithKind("PartitionSet")), } From 46c00df3968f65731cd0ffcfa046c692c9a2852c Mon Sep 17 00:00:00 2001 From: Frederic Giloux Date: Thu, 23 Feb 2023 11:39:16 +0100 Subject: [PATCH 3/4] Prevent APIBinding test to fail when non schedulable shards are added by other e2e test Signed-off-by: Frederic Giloux --- test/e2e/apibinding/apibinding_test.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/test/e2e/apibinding/apibinding_test.go b/test/e2e/apibinding/apibinding_test.go index 338f9fc438d..d3e80558998 100644 --- a/test/e2e/apibinding/apibinding_test.go +++ b/test/e2e/apibinding/apibinding_test.go @@ -47,6 +47,7 @@ import ( "github.com/kcp-dev/kcp/config/helpers" apisv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/apis/v1alpha1" "github.com/kcp-dev/kcp/pkg/apis/core" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" "github.com/kcp-dev/kcp/pkg/apis/third_party/conditions/util/conditions" kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" "github.com/kcp-dev/kcp/test/e2e/fixtures/wildwest/apis/wildwest" @@ -167,13 +168,22 @@ func TestAPIBinding(t *testing.T) { t.Logf("Getting a list of VirtualWorkspaceURLs assigned to Shards") shards, err := kcpClusterClient.Cluster(core.RootCluster.Path()).CoreV1alpha1().Shards().List(ctx, metav1.ListOptions{}) require.NoError(t, err) + // Filtering out shards that are not schedulable + var shardItems []corev1alpha1.Shard + for _, s := range shards.Items { + if _, ok := s.Annotations["experimental.core.kcp.io/unschedulable"]; !ok { + shardItems = append(shardItems, s) + } + } require.Eventually(t, func() bool { - for _, s := range shards.Items { - if len(s.Spec.VirtualWorkspaceURL) == 0 { - t.Logf("%q shard hasn't had assigned a virtual workspace URL", s.Name) - return false + for _, s := range shardItems { + if _, ok := s.Annotations["experimental.core.kcp.io/unschedulable"]; !ok { + if len(s.Spec.VirtualWorkspaceURL) == 0 { + t.Logf("%q shard hasn't had assigned a virtual workspace URL", s.Name) + return false + } + shardVirtualWorkspaceURLs.Insert(s.Spec.VirtualWorkspaceURL) } - shardVirtualWorkspaceURLs.Insert(s.Spec.VirtualWorkspaceURL) } return true }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected all Shards to have a VirtualWorkspaceURL assigned") @@ -356,7 +366,7 @@ func TestAPIBinding(t *testing.T) { gvrWithIdentity := wildwestv1alpha1.SchemeGroupVersion.WithResource("cowboys:" + identity) var names []string - for _, shard := range shards.Items { + for _, shard := range shardItems { t.Logf("Doing a wildcard identity list for %v against %s workspace on shard %s", gvrWithIdentity, consumerWorkspace, shard.Name) shardDynamicClusterClients, err := kcpdynamic.NewForConfig(server.ShardSystemMasterBaseConfig(t, shard.Name)) require.NoError(t, err) From d6bdad470ffbf2cf12c63618d951d814a543e064 Mon Sep 17 00:00:00 2001 From: Frederic Giloux Date: Wed, 18 Jan 2023 11:23:30 +0100 Subject: [PATCH 4/4] Add end-to-end tests for PartitionSets Signed-off-by: Frederic Giloux --- .../partitionset/partitionset_test.go | 397 ++++++++++++++++++ 1 file changed, 397 insertions(+) create mode 100644 test/e2e/reconciler/partitionset/partitionset_test.go diff --git a/test/e2e/reconciler/partitionset/partitionset_test.go b/test/e2e/reconciler/partitionset/partitionset_test.go new file mode 100644 index 00000000000..9f40629d59a --- /dev/null +++ b/test/e2e/reconciler/partitionset/partitionset_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2023 The KCP Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package partitionset + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/stretchr/testify/require" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/wait" + + "github.com/kcp-dev/kcp/pkg/apis/core" + corev1alpha1 "github.com/kcp-dev/kcp/pkg/apis/core/v1alpha1" + "github.com/kcp-dev/kcp/pkg/apis/third_party/conditions/util/conditions" + topologyv1alpha1 "github.com/kcp-dev/kcp/pkg/apis/topology/v1alpha1" + kcpclientset "github.com/kcp-dev/kcp/pkg/client/clientset/versioned/cluster" + "github.com/kcp-dev/kcp/test/e2e/framework" +) + +func TestPartitionSet(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server := framework.SharedKcpServer(t) + + // Create organization and workspace. + // Organizations help with multiple runs. + orgPath, _ := framework.NewOrganizationFixture(t, server) + partitionClusterPath, _ := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithName("partitionset")) + + cfg := server.BaseConfig(t) + kcpClusterClient, err := kcpclientset.NewForConfig(cfg) + require.NoError(t, err, "failed to construct kcp cluster client for server") + partitionSetClient := kcpClusterClient.TopologyV1alpha1().PartitionSets() + partitionClient := kcpClusterClient.TopologyV1alpha1().Partitions() + var partitions *topologyv1alpha1.PartitionList + + t.Logf("Creating a partitionSet not matching any shard") + partitionSet := &topologyv1alpha1.PartitionSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-partitionset", + }, + Spec: topologyv1alpha1.PartitionSetSpec{ + Dimensions: []string{"partition-test-region"}, + ShardSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "excluded", + Operator: metav1.LabelSelectorOpDoesNotExist, + }, + }, + }, + }, + } + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, partitionSet, metav1.CreateOptions{}) + require.NoError(t, err, "error creating partitionSet") + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionSetValid) && conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) { + return true, "" + } + return false, spew.Sdump(partitionSet.Status.Conditions) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected valid partitionSet") + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + require.Equal(t, 0, len(partitions.Items), "no partition expected, got: %d", len(partitions.Items)) + + // Newly added shards are annotated to avoid side effects on other e2e tests. + t.Logf("Creating a shard matching the partitionSet") + shard1a := &corev1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "partition-shard-1a", + Labels: map[string]string{ + "partition-test-region": "partition-test-region-1", + }, + Annotations: map[string]string{ + "experimental.core.kcp.io/unschedulable": "true", + }, + }, + Spec: corev1alpha1.ShardSpec{ + BaseURL: "https://base.kcp.test.dev", + }, + } + shardClient := kcpClusterClient.CoreV1alpha1().Shards() + shard1a, err = shardClient.Cluster(core.RootCluster.Path()).Create(ctx, shard1a, metav1.CreateOptions{}) + require.NoError(t, err, "error creating shard") + // Necessary for multiple runs. + defer func() { + err = shardClient.Cluster(core.RootCluster.Path()).Delete(ctx, shard1a.Name, metav1.DeleteOptions{}) + require.NoError(t, err, "error deleting shard") + }() + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) && partitionSet.Status.Count == uint16(1) { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", partitionSet.Status.Count) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected the partition count to be 1") + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 1 { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 1 partition") + require.Equal(t, map[string]string{"partition-test-region": "partition-test-region-1"}, partitions.Items[0].Spec.Selector.MatchLabels, "selector not as expected") + + t.Logf("Creating a shard in a second region") + shard2 := &corev1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "partition-shard-2", + Labels: map[string]string{ + "partition-test-region": "partition-test-region-2", + }, + Annotations: map[string]string{ + "experimental.core.kcp.io/unschedulable": "true", + }, + }, + Spec: corev1alpha1.ShardSpec{ + BaseURL: "https://base.kcp.test.dev", + }, + } + shard2, err = shardClient.Cluster(core.RootCluster.Path()).Create(ctx, shard2, metav1.CreateOptions{}) + // Necessary for multiple runs. + require.NoError(t, err, "error creating shard") + defer func() { + err = shardClient.Cluster(core.RootCluster.Path()).Delete(ctx, shard2.Name, metav1.DeleteOptions{}) + require.NoError(t, err, "error deleting shard") + }() + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) && partitionSet.Status.Count == uint16(2) { + return true, "" + } + return false, fmt.Sprintf("expected 2 partitions, but got %d", partitionSet.Status.Count) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected the partitions to be ready and their count to be 2") + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 2 { + return true, "" + } + return false, fmt.Sprintf("expected 2 partitions, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 2 partitions") + require.True(t, (reflect.DeepEqual(partitions.Items[0].Spec.Selector.MatchLabels, map[string]string{"partition-test-region": "partition-test-region-1"}) && + reflect.DeepEqual(partitions.Items[1].Spec.Selector.MatchLabels, map[string]string{"partition-test-region": "partition-test-region-2"})) || + (reflect.DeepEqual(partitions.Items[0].Spec.Selector.MatchLabels, map[string]string{"partition-test-region": "partition-test-region-2"}) && + reflect.DeepEqual(partitions.Items[1].Spec.Selector.MatchLabels, map[string]string{"partition-test-region": "partition-test-region-1"})), "selectors not as expected") + + t.Logf("Moving the second shard to the same region as the first one") + shard2.Labels = map[string]string{ + "partition-test-region": "partition-test-region-1", + } + _, err = shardClient.Cluster(core.RootCluster.Path()).Update(ctx, shard2, metav1.UpdateOptions{}) + require.NoError(t, err, "error updating shard") + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) && partitionSet.Status.Count == uint16(1) { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", partitionSet.Status.Count) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected the partition count to become 1") + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 1 { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 1 partition") + + t.Logf("Creating a shard part of a third region") + shard3 := &corev1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "partition-shard-3", + Labels: map[string]string{ + "partition-test-region": "partition-test-region-3", + }, + Annotations: map[string]string{ + "experimental.core.kcp.io/unschedulable": "true", + }, + }, + Spec: corev1alpha1.ShardSpec{ + BaseURL: "https://base.kcp.test.dev", + }, + } + shard3, err = shardClient.Cluster(core.RootCluster.Path()).Create(ctx, shard3, metav1.CreateOptions{}) + require.NoError(t, err, "error creating shard") + defer func() { + err = shardClient.Cluster(core.RootCluster.Path()).Delete(ctx, shard3.Name, metav1.DeleteOptions{}) + require.NoError(t, err, "error deleting shard") + }() + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) && partitionSet.Status.Count == uint16(2) { + return true, "" + } + return false, fmt.Sprintf("expected 2 partitions, but got %d", partitionSet.Status.Count) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected the partition count to become 2") + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 2 { + return true, "" + } + return false, fmt.Sprintf("expected 2 partitions, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 2 partitions") + + t.Logf("Excluding the shard of the third region") + shard3.Labels = map[string]string{ + "partition-test-region": "partition-test-region-3", + "excluded": "true", + } + _, err = shardClient.Cluster(core.RootCluster.Path()).Update(ctx, shard3, metav1.UpdateOptions{}) + require.NoError(t, err, "error updating shard") + framework.Eventually(t, func() (bool, string) { + partitionSet, err = partitionSetClient.Cluster(partitionClusterPath).Get(ctx, partitionSet.Name, metav1.GetOptions{}) + require.NoError(t, err, "error retrieving partitionSet") + if conditions.IsTrue(partitionSet, topologyv1alpha1.PartitionsReady) && partitionSet.Status.Count == uint16(1) { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", partitionSet.Status.Count) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected the partition count to become 1") + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 1 { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 1 partition") +} + +func TestPartitionSetAdmission(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server := framework.SharedKcpServer(t) + + // Create organization and workspace. + // Organizations help with multiple runs. + orgPath, _ := framework.NewOrganizationFixture(t, server) + partitionClusterPath, _ := framework.NewWorkspaceFixture(t, server, orgPath, framework.WithName("partitionset-admission")) + + cfg := server.BaseConfig(t) + kcpClusterClient, err := kcpclientset.NewForConfig(cfg) + require.NoError(t, err, "failed to construct kcp cluster client for server") + partitionSetClient := kcpClusterClient.TopologyV1alpha1().PartitionSets() + partitionClient := kcpClusterClient.TopologyV1alpha1().Partitions() + shardClient := kcpClusterClient.CoreV1alpha1().Shards() + + errorPartitionSet := &topologyv1alpha1.PartitionSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "admission-partitionset", + }, + Spec: topologyv1alpha1.PartitionSetSpec{ + Dimensions: []string{"region"}, + }, + } + + t.Logf("Key too long in matchExpressions") + errorPartitionSet.Spec.ShardSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "region/1234567890123456789012345678901234567890123456789012345678901234567890", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"antartica", "greenland"}, + }, + }, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, errorPartitionSet, metav1.CreateOptions{}) + require.Error(t, err, "error creating partitionSet expected") + + t.Logf("Character not allowed at first place in matchExpressions values") + errorPartitionSet.Spec.ShardSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "region", + Operator: metav1.LabelSelectorOpNotIn, + Values: []string{"antartica", "_A.123456789012345678901234567890123456789012345678901234567890"}, + }, + }, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, errorPartitionSet, metav1.CreateOptions{}) + require.Error(t, err, "error creating partitionSet expected") + + t.Logf("Invalid value in matchExpressions operator") + errorPartitionSet.Spec.ShardSelector = &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "region", + Operator: "DoesNotExist", + Values: []string{"antartica", "greenland"}, + }, + }, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, errorPartitionSet, metav1.CreateOptions{}) + require.Error(t, err, "error creating partitionSet expected") + + t.Logf("Invalid key in matchLabels") + errorPartitionSet.Spec.ShardSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{"1234567890123456789_01234567890123456789/aaa": "keynotvalid"}, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, errorPartitionSet, metav1.CreateOptions{}) + require.Error(t, err, "error creating partitionSet expected") + + t.Logf("Invalid value in matchLabels") + errorPartitionSet.Spec.ShardSelector = &metav1.LabelSelector{ + MatchLabels: map[string]string{"valuenotvalid": "1234567890123456789%%%01234567890123456789"}, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, errorPartitionSet, metav1.CreateOptions{}) + require.Error(t, err, "error creating partitionSet expected") + + t.Logf("Partition name cut when the label values sum up") + partitionSet := &topologyv1alpha1.PartitionSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-partitionset", + }, + Spec: topologyv1alpha1.PartitionSetSpec{ + Dimensions: []string{"partition-test-label1", "partition-test-label2", "partition-test-label3", "partition-test-label4", "partition-test-label5"}, + }, + } + _, err = partitionSetClient.Cluster(partitionClusterPath).Create(ctx, partitionSet, metav1.CreateOptions{}) + require.NoError(t, err, "error updating partitionSet") + labelValues := []string{ + "label1-12345678901234567890123456789012345678901234567890", + "label2-12345678901234567890123456789012345678901234567890", + "label3-12345678901234567890123456789012345678901234567890", + "label4-12345678901234567890123456789012345678901234567890", + "label5-12345678901234567890123456789012345678901234567890", + } + admissionShard := &corev1alpha1.Shard{ + ObjectMeta: metav1.ObjectMeta{ + Name: "partition-shard-adm", + Labels: map[string]string{ + "partition-test-label1": labelValues[0], + "partition-test-label2": labelValues[1], + "partition-test-label3": labelValues[2], + "partition-test-label4": labelValues[3], + "partition-test-label5": labelValues[4], + }, + Annotations: map[string]string{ + "experimental.core.kcp.io/unschedulable": "true", + }, + }, + Spec: corev1alpha1.ShardSpec{ + BaseURL: "https://base.kcp.test.dev", + }, + } + shard, err := shardClient.Cluster(core.RootCluster.Path()).Create(ctx, admissionShard, metav1.CreateOptions{}) + require.NoError(t, err, "error creating shard") + defer func() { + err = shardClient.Cluster(core.RootCluster.Path()).Delete(ctx, shard.Name, metav1.DeleteOptions{}) + require.NoError(t, err, "error deleting shard") + }() + var partitions *topologyv1alpha1.PartitionList + framework.Eventually(t, func() (bool, string) { + partitions, err = partitionClient.Cluster(partitionClusterPath).List(ctx, metav1.ListOptions{}) + require.NoError(t, err, "error retrieving partitions") + if len(partitions.Items) == 1 { + return true, "" + } + return false, fmt.Sprintf("expected 1 partition, but got %d", len(partitions.Items)) + }, wait.ForeverTestTimeout, 100*time.Millisecond, "expected 1 partition") + expectedName := partitionSet.Name + "-" + strings.Join(labelValues, "-") + expectedName = expectedName[:validation.DNS1123LabelMaxLength-5] + require.EqualValues(t, expectedName, partitions.Items[0].Name[:len(partitions.Items[0].Name)-5], + "partition name not as expected, got: %s", partitions.Items[0].Name) +}