Skip to content

Bug 1860035: Fix SubscriptionConfig NodeSelector field #1716

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 25 additions & 5 deletions doc/design/subscription-config.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ The `env` field defines a list of [Environment Variables](https://kubernetes.io/

Increase log verbosity on an Operator's container that utilizes the `ARGS` variable:

```
```yaml
kind: Subscription
metadata:
name: prometheus
Expand All @@ -39,7 +39,7 @@ The `envFrom` field defines a [list of sources to populate Environment Variables

Inject a license key residing in a Secret to unlock Operator features:

```
```yaml
kind: Subscription
metadata:
name: my-operator
Expand Down Expand Up @@ -68,7 +68,7 @@ The `volumeMounts` field defines a list of [VolumeMounts](https://kubernetes.io/

Mount a ConfigMap as a Volume that contains configuration information that can change default Operator behavior. Modifications to the content of the ConfigMap should appear within the container's VolumeMount.

```
```yaml
kind: Subscription
metadata:
name: my-operator
Expand All @@ -95,7 +95,7 @@ The `tolerations` field defines a list of [Tolerations](https://kubernetes.io/do

Inject toleration to tolerate all taints.

```
```yaml
kind: Subscription
metadata:
name: my-operator
Expand All @@ -117,7 +117,7 @@ The `resources` field defines [Resource Constraints](https://kubernetes.io/docs/

Inject a request of 0.25 cpu and 64 MiB of memory, and a limit of 0.5 cpu and 128MiB of memory in each container.

```
```yaml
kind: Subscription
metadata:
name: my-operator
Expand All @@ -133,3 +133,23 @@ spec:
memory: "128Mi"
cpu: "500m"
```

### NodeSelector

The `nodeSelector` field defines a [NodeSelector](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/) for the Pod created by OLM.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: this is a bit vague. Could we say "for the pods associated with the operator deployment" instead?

This also future-proofs it a bit where in the future OLM supports more than just deploying operators, something we discussed recently

Copy link
Member Author

@awgreene awgreene Aug 7, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: this is a bit vague. Could we say "for the pods associated with the operator deployment" instead?

Technically this config is applied to all deployments created by OLM.

This also future-proofs it a bit where in the future OLM supports more than just deploying operators, something we discussed recently

OLM also creates deployments for webhooks, which could be stored in the same deployment as the operator but isn't guaranteed, so I don't know how OLM could only create deployments for operators.


#### Example

Inject `nodeSelector` key-values pairs.

```yaml
kind: Subscription
metadata:
name: my-operator
spec:
package: etcd
channel: alpha
config:
nodeSelector:
foo: bar
```
3 changes: 2 additions & 1 deletion pkg/controller/operators/olm/overrides/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ type operatorConfig struct {
logger *logrus.Logger
}

func (o *operatorConfig) GetConfigOverrides(ownerCSV ownerutil.Owner) (envVarOverrides []corev1.EnvVar, volumeOverrides []corev1.Volume, volumeMountOverrides []corev1.VolumeMount, tolerationOverrides []corev1.Toleration, resourcesOverride corev1.ResourceRequirements, err error) {
func (o *operatorConfig) GetConfigOverrides(ownerCSV ownerutil.Owner) (envVarOverrides []corev1.EnvVar, volumeOverrides []corev1.Volume, volumeMountOverrides []corev1.VolumeMount, tolerationOverrides []corev1.Toleration, resourcesOverride corev1.ResourceRequirements, nodeSelectorOverride map[string]string, err error) {
list, listErr := o.lister.OperatorsV1alpha1().SubscriptionLister().Subscriptions(ownerCSV.GetNamespace()).List(labels.Everything())
if listErr != nil {
err = fmt.Errorf("failed to list subscription namespace=%s - %v", ownerCSV.GetNamespace(), listErr)
Expand All @@ -34,6 +34,7 @@ func (o *operatorConfig) GetConfigOverrides(ownerCSV ownerutil.Owner) (envVarOve
volumeMountOverrides = owner.Spec.Config.VolumeMounts
tolerationOverrides = owner.Spec.Config.Tolerations
resourcesOverride = owner.Spec.Config.Resources
nodeSelectorOverride = owner.Spec.Config.NodeSelector

return
}
Expand Down
6 changes: 5 additions & 1 deletion pkg/controller/operators/olm/overrides/initializer.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func (d *DeploymentInitializer) initialize(ownerCSV ownerutil.Owner, deployment
var envVarOverrides, proxyEnvVar, merged []corev1.EnvVar
var err error

envVarOverrides, volumeOverrides, volumeMountOverrides, tolerationOverrides, resourcesOverride, err := d.config.GetConfigOverrides(ownerCSV)
envVarOverrides, volumeOverrides, volumeMountOverrides, tolerationOverrides, resourcesOverride, nodeSelectorOverride, err := d.config.GetConfigOverrides(ownerCSV)
if err != nil {
err = fmt.Errorf("failed to get subscription pod configuration - %v", err)
return err
Expand Down Expand Up @@ -87,6 +87,10 @@ func (d *DeploymentInitializer) initialize(ownerCSV ownerutil.Owner, deployment
return fmt.Errorf("failed to inject resources into deployment spec name=%s - %v", deployment.Name, err)
}

if err = InjectNodeSelectorIntoDeployment(podSpec, nodeSelectorOverride); err != nil {
return fmt.Errorf("failed to inject nodeSelector into deployment spec name=%s - %v", deployment.Name, err)
}

return nil
}

Expand Down
14 changes: 14 additions & 0 deletions pkg/controller/operators/olm/overrides/inject.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,3 +206,17 @@ func InjectResourcesIntoDeployment(podSpec *corev1.PodSpec, resources corev1.Res

return nil
}

// InjectNodeSelectorIntoDeployment injects the provided NodeSelector
// into the container(s) of the given PodSpec.
//
// If any Container in PodSpec already defines a NodeSelector it will
// be overwritten.
func InjectNodeSelectorIntoDeployment(podSpec *corev1.PodSpec, nodeSelector map[string]string) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: this function is called InjectNodeSelectorIntoDeployment but injects the nodeselector into a PodSpec

maybe calling it InjectNodeSelectorIntoPodSpec makes it a little clearer?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This makes sense, but none of the other methods in this file distinguish injecting into the deployment or podSpec. I suspect this was either an oversight when the code was written or assuming its fine given that the podspec is part of the deployment. Should we change the distinction in all places or follow convention.

if podSpec == nil {
return errors.New("no pod spec provided")
}

podSpec.NodeSelector = nodeSelector
return nil
}
58 changes: 58 additions & 0 deletions pkg/controller/operators/olm/overrides/inject_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,13 @@ var (
corev1.ResourceMemory: resource.MustParse("128Mi"),
},
}

defaultNodeSelector = map[string]string{
"all": "your",
"base": "are",
"belong": "to",
"us": "",
}
)

func TestInjectVolumeMountIntoDeployment(t *testing.T) {
Expand Down Expand Up @@ -701,3 +708,54 @@ func TestInjectResourcesIntoDeployment(t *testing.T) {
})
}
}

func TestInjectNodeSelectorIntoDeployment(t *testing.T) {
tests := []struct {
name string
podSpec *corev1.PodSpec
nodeSelector map[string]string
expected *corev1.PodSpec
}{
{
// Nil PodSpec is injected with a nodeSelector
// Expected: PodSpec is nil
name: "WithNilPodSpec",
podSpec: nil,
nodeSelector: map[string]string{"foo": "bar"},
expected: nil,
},
{
// PodSpec with no NodeSelector is injected with a nodeSelector
// Expected: NodeSelector is empty
name: "WithEmptyNodeSelector",
podSpec: &corev1.PodSpec{},
nodeSelector: map[string]string{"foo": "bar"},
expected: &corev1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
},
},
{
// PodSpec with an existing NodeSelector is injected with a nodeSelector
// Expected: Existing NodeSelector is overwritten
name: "WithExistingNodeSelector",
podSpec: &corev1.PodSpec{
NodeSelector: defaultNodeSelector,
},
nodeSelector: map[string]string{"foo": "bar"},
expected: &corev1.PodSpec{
NodeSelector: map[string]string{"foo": "bar"},
},
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
overrides.InjectNodeSelectorIntoDeployment(tt.podSpec, tt.nodeSelector)

podSpecWant := tt.expected
podSpecGot := tt.podSpec

assert.Equal(t, podSpecWant, podSpecGot)
})
}
}
84 changes: 84 additions & 0 deletions test/e2e/subscription_e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,11 @@ func Step(level int, text string, callbacks ...func()) {
By(strings.Repeat(" ", level*2)+text, callbacks...)
}

const (
timeout = time.Second * 20
interval = time.Millisecond * 100
)

var _ = By

var _ = Describe("Subscription", func() {
Expand Down Expand Up @@ -1187,6 +1192,60 @@ var _ = Describe("Subscription", func() {
checkDeploymentWithPodConfiguration(GinkgoT(), kubeClient, csv, podConfig.Env, podConfig.Volumes, podConfig.VolumeMounts, podConfig.Tolerations, podConfig.Resources)
})

It("creation with nodeSelector config", func() {
kubeClient := newKubeClient()
crClient := newCRClient()

// Create a ConfigMap that is mounted to the operator via the subscription
testConfigMapName := genName("test-configmap-")
testConfigMap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: testConfigMapName,
},
}

_, err := kubeClient.KubernetesInterface().CoreV1().ConfigMaps(testNamespace).Create(context.Background(), testConfigMap, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
defer func() {
err := kubeClient.KubernetesInterface().CoreV1().ConfigMaps(testNamespace).Delete(context.Background(), testConfigMap.Name, metav1.DeleteOptions{})
require.NoError(GinkgoT(), err)
}()

// Configure the Subscription.
podNodeSelector := map[string]string{
"foo": "bar",
}

podConfig := v1alpha1.SubscriptionConfig{
NodeSelector: podNodeSelector,
}

permissions := deploymentPermissions()
catsrc, subSpec, catsrcCleanup := newCatalogSource(GinkgoT(), kubeClient, crClient, "podconfig", testNamespace, permissions)
defer catsrcCleanup()

// Ensure that the catalog source is resolved before we create a subscription.
_, err = fetchCatalogSourceOnStatus(crClient, catsrc.GetName(), testNamespace, catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)

subscriptionName := genName("podconfig-sub-")
subSpec.Config = podConfig
cleanupSubscription := createSubscriptionForCatalogWithSpec(GinkgoT(), crClient, testNamespace, subscriptionName, subSpec)
defer cleanupSubscription()

subscription, err := fetchSubscription(crClient, testNamespace, subscriptionName, subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)

csv, err := fetchCSV(crClient, subscription.Status.CurrentCSV, testNamespace, buildCSVConditionChecker(v1alpha1.CSVPhaseInstalling))
require.NoError(GinkgoT(), err)

Eventually(func() error {
return checkDeploymentHasPodConfigNodeSelector(GinkgoT(), kubeClient, csv, podNodeSelector)
}, timeout, interval).Should(Succeed())

})

It("creation with dependencies", func() {

kubeClient := newKubeClient()
Expand Down Expand Up @@ -2338,6 +2397,31 @@ func createSubscriptionForCatalogWithSpec(t GinkgoTInterface, crc versioned.Inte
return buildSubscriptionCleanupFunc(crc, subscription)
}

func checkDeploymentHasPodConfigNodeSelector(t GinkgoTInterface, client operatorclient.ClientInterface, csv *v1alpha1.ClusterServiceVersion, nodeSelector map[string]string) error {
resolver := install.StrategyResolver{}

strategy, err := resolver.UnmarshalStrategy(csv.Spec.InstallStrategy)
if err != nil {
return err
}

strategyDetailsDeployment, ok := strategy.(*v1alpha1.StrategyDetailsDeployment)
require.Truef(t, ok, "could not cast install strategy as type %T", strategyDetailsDeployment)

for _, deploymentSpec := range strategyDetailsDeployment.DeploymentSpecs {
deployment, err := client.KubernetesInterface().AppsV1().Deployments(csv.GetNamespace()).Get(context.Background(), deploymentSpec.Name, metav1.GetOptions{})
if err != nil {
return err
}

isEqual := reflect.DeepEqual(nodeSelector, deployment.Spec.Template.Spec.NodeSelector)
if !isEqual {
err = fmt.Errorf("actual nodeSelector=%v does not match expected nodeSelector=%v", deploymentSpec.Spec.Template.Spec.NodeSelector, nodeSelector)
}
}
return nil
}

func checkDeploymentWithPodConfiguration(t GinkgoTInterface, client operatorclient.ClientInterface, csv *v1alpha1.ClusterServiceVersion, envVar []corev1.EnvVar, volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, tolerations []corev1.Toleration, resources corev1.ResourceRequirements) {
resolver := install.StrategyResolver{}

Expand Down