diff --git a/config/crd/bases/policies.kubewarden.io_admissionpolicies.yaml b/config/crd/bases/policies.kubewarden.io_admissionpolicies.yaml
index 6590619e9..feb70f644 100644
--- a/config/crd/bases/policies.kubewarden.io_admissionpolicies.yaml
+++ b/config/crd/bases/policies.kubewarden.io_admissionpolicies.yaml
@@ -87,20 +87,20 @@ spec:
the API request to be rejected. The default behaviour is "Fail"
type: string
matchPolicy:
- description: "matchPolicy defines how the \"rules\" list is used to
- match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".
- \n - Exact: match a request only if it exactly matches a specified
+ description: 'matchPolicy defines how the "rules" list is used to
+ match incoming requests. Allowed values are "Exact" or "Equivalent".
+
- Exact: match a request only if it exactly matches a specified
rule. For example, if deployments can be modified via apps/v1, apps/v1beta1,
- and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"],
- apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to
- apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
- \n - Equivalent: match a request if modifies a resource listed in
- rules, even via another API group or version. For example, if deployments
+ and extensions/v1beta1, but "rules" only included `apiGroups:["apps"],
+ apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1
+ or extensions/v1beta1 would not be sent to the webhook.
-
+ Equivalent: match a request if modifies a resource listed in rules,
+ even via another API group or version. For example, if deployments
can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
- and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"],
- resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1
- would be converted to apps/v1 and sent to the webhook. \n Defaults
- to \"Equivalent\""
+ and "rules" only included `apiGroups:["apps"], apiVersions:["v1"],
+ resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1
+ would be converted to apps/v1 and sent to the webhook.
+ Defaults to "Equivalent"'
type: string
mode:
default: protect
@@ -425,20 +425,20 @@ spec:
the API request to be rejected. The default behaviour is "Fail"
type: string
matchPolicy:
- description: "matchPolicy defines how the \"rules\" list is used to
- match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".
- \n - Exact: match a request only if it exactly matches a specified
+ description: 'matchPolicy defines how the "rules" list is used to
+ match incoming requests. Allowed values are "Exact" or "Equivalent".
+ - Exact: match a request only if it exactly matches a specified
rule. For example, if deployments can be modified via apps/v1, apps/v1beta1,
- and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"],
- apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to
- apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
- \n - Equivalent: match a request if modifies a resource listed in
- rules, even via another API group or version. For example, if deployments
+ and extensions/v1beta1, but "rules" only included `apiGroups:["apps"],
+ apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1
+ or extensions/v1beta1 would not be sent to the webhook.
-
+ Equivalent: match a request if modifies a resource listed in rules,
+ even via another API group or version. For example, if deployments
can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
- and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"],
- resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1
- would be converted to apps/v1 and sent to the webhook. \n Defaults
- to \"Equivalent\""
+ and "rules" only included `apiGroups:["apps"], apiVersions:["v1"],
+ resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1
+ would be converted to apps/v1 and sent to the webhook.
+ Defaults to "Equivalent"'
type: string
mode:
default: protect
diff --git a/config/crd/bases/policies.kubewarden.io_clusteradmissionpolicies.yaml b/config/crd/bases/policies.kubewarden.io_clusteradmissionpolicies.yaml
index 82097282d..15d732dd2 100644
--- a/config/crd/bases/policies.kubewarden.io_clusteradmissionpolicies.yaml
+++ b/config/crd/bases/policies.kubewarden.io_clusteradmissionpolicies.yaml
@@ -108,20 +108,20 @@ spec:
the API request to be rejected. The default behaviour is "Fail"
type: string
matchPolicy:
- description: "matchPolicy defines how the \"rules\" list is used to
- match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".
- \n - Exact: match a request only if it exactly matches a specified
+ description: 'matchPolicy defines how the "rules" list is used to
+ match incoming requests. Allowed values are "Exact" or "Equivalent".
+ - Exact: match a request only if it exactly matches a specified
rule. For example, if deployments can be modified via apps/v1, apps/v1beta1,
- and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"],
- apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to
- apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
- \n - Equivalent: match a request if modifies a resource listed in
- rules, even via another API group or version. For example, if deployments
+ and extensions/v1beta1, but "rules" only included `apiGroups:["apps"],
+ apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1
+ or extensions/v1beta1 would not be sent to the webhook.
-
+ Equivalent: match a request if modifies a resource listed in rules,
+ even via another API group or version. For example, if deployments
can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
- and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"],
- resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1
- would be converted to apps/v1 and sent to the webhook. \n Defaults
- to \"Equivalent\""
+ and "rules" only included `apiGroups:["apps"], apiVersions:["v1"],
+ resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1
+ would be converted to apps/v1 and sent to the webhook.
+ Defaults to "Equivalent"'
type: string
mode:
default: protect
@@ -147,22 +147,31 @@ spec:
mutate incoming requests or not.
type: boolean
namespaceSelector:
- description: "NamespaceSelector decides whether to run the webhook
+ description: 'NamespaceSelector decides whether to run the webhook
on an object based on whether the namespace for that object matches
the selector. If the object itself is a namespace, the matching
is performed on object.metadata.labels. If the object is another
- cluster scoped resource, it never skips the webhook. \n For example,
- to run the webhook on any objects whose namespace is not associated
- with \"runlevel\" of \"0\" or \"1\"; you will set the selector
- as follows: \"namespaceSelector\": { \"matchExpressions\": [ { \"key\":
- \"runlevel\", \"operator\": \"NotIn\", \"values\": [ \"0\", \"1\"
- ] } ] } \n If instead you want to only run the webhook on any objects
- whose namespace is associated with the \"environment\" of \"prod\"
- or \"staging\"; you will set the selector as follows: \"namespaceSelector\":
- { \"matchExpressions\": [ { \"key\": \"environment\", \"operator\":
- \"In\", \"values\": [ \"prod\", \"staging\" ] } ] } \n See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
- for more examples of label selectors. \n Default to the empty LabelSelector,
- which matches everything."
+ cluster scoped resource, it never skips the webhook.
+ For example, to run the webhook on any objects whose namespace is
+ not associated with "runlevel" of "0" or "1"; you will set the
+ selector as follows: "namespaceSelector": \{
"matchExpressions":
+ [
\{
"key":
+ "runlevel",
"operator":
+ "NotIn",
"values": [
+ "0",
"1"
+ ]
\}
+ ]
\}
If instead you want to only run the
+ webhook on any objects whose namespace is associated with the "environment"
+ of "prod" or "staging"; you will set the selector as follows:
+ "namespaceSelector": \{
"matchExpressions": [
+ \{
"key":
+ "environment",
"operator":
+ "In",
"values": [
+ "prod",
"staging"
+ ]
\}
+ ]
\}
See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ for more examples of label selectors.
Default to the
+ empty LabelSelector, which matches everything.'
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
@@ -506,20 +515,20 @@ spec:
the API request to be rejected. The default behaviour is "Fail"
type: string
matchPolicy:
- description: "matchPolicy defines how the \"rules\" list is used to
- match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".
- \n - Exact: match a request only if it exactly matches a specified
+ description: 'matchPolicy defines how the "rules" list is used to
+ match incoming requests. Allowed values are "Exact" or "Equivalent".
+ - Exact: match a request only if it exactly matches a specified
rule. For example, if deployments can be modified via apps/v1, apps/v1beta1,
- and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"],
- apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to
- apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.
- \n - Equivalent: match a request if modifies a resource listed in
- rules, even via another API group or version. For example, if deployments
+ and extensions/v1beta1, but "rules" only included `apiGroups:["apps"],
+ apiVersions:["v1"], resources: ["deployments"]`, a request to apps/v1beta1
+ or extensions/v1beta1 would not be sent to the webhook.
-
+ Equivalent: match a request if modifies a resource listed in rules,
+ even via another API group or version. For example, if deployments
can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
- and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"],
- resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1
- would be converted to apps/v1 and sent to the webhook. \n Defaults
- to \"Equivalent\""
+ and "rules" only included `apiGroups:["apps"], apiVersions:["v1"],
+ resources: ["deployments"]`, a request to apps/v1beta1 or extensions/v1beta1
+ would be converted to apps/v1 and sent to the webhook.
+ Defaults to "Equivalent"'
type: string
mode:
default: protect
@@ -544,22 +553,31 @@ spec:
mutate incoming requests or not.
type: boolean
namespaceSelector:
- description: "NamespaceSelector decides whether to run the webhook
+ description: 'NamespaceSelector decides whether to run the webhook
on an object based on whether the namespace for that object matches
the selector. If the object itself is a namespace, the matching
is performed on object.metadata.labels. If the object is another
- cluster scoped resource, it never skips the webhook. \n For example,
- to run the webhook on any objects whose namespace is not associated
- with \"runlevel\" of \"0\" or \"1\"; you will set the selector
- as follows: \"namespaceSelector\": { \"matchExpressions\": [ { \"key\":
- \"runlevel\", \"operator\": \"NotIn\", \"values\": [ \"0\", \"1\"
- ] } ] } \n If instead you want to only run the webhook on any objects
- whose namespace is associated with the \"environment\" of \"prod\"
- or \"staging\"; you will set the selector as follows: \"namespaceSelector\":
- { \"matchExpressions\": [ { \"key\": \"environment\", \"operator\":
- \"In\", \"values\": [ \"prod\", \"staging\" ] } ] } \n See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
- for more examples of label selectors. \n Default to the empty LabelSelector,
- which matches everything."
+ cluster scoped resource, it never skips the webhook.
+ For example, to run the webhook on any objects whose namespace is
+ not associated with "runlevel" of "0" or "1"; you will set the
+ selector as follows: "namespaceSelector": \{
"matchExpressions":
+ [
\{
"key":
+ "runlevel",
"operator":
+ "NotIn",
"values": [
+ "0",
"1"
+ ]
\}
+ ]
\}
If instead you want to only run the
+ webhook on any objects whose namespace is associated with the "environment"
+ of "prod" or "staging"; you will set the selector as follows:
+ "namespaceSelector": \{
"matchExpressions": [
+ \{
"key":
+ "environment",
"operator":
+ "In",
"values": [
+ "prod",
"staging"
+ ]
\}
+ ]
\}
See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels
+ for more examples of label selectors.
Default to the
+ empty LabelSelector, which matches everything.'
properties:
matchExpressions:
description: matchExpressions is a list of label selector requirements.
diff --git a/config/crd/bases/policies.kubewarden.io_policyservers.yaml b/config/crd/bases/policies.kubewarden.io_policyservers.yaml
index 9dac51a66..2db1c93fb 100644
--- a/config/crd/bases/policies.kubewarden.io_policyservers.yaml
+++ b/config/crd/bases/policies.kubewarden.io_policyservers.yaml
@@ -1136,10 +1136,27 @@ spec:
used for pulling policies from repositories.
type: string
insecureSources:
- description: List of insecure URIs to policy repositories.
+ description: List of insecure URIs to policy repositories. The `insecureSources`
+ content format corresponds with the contents of the `insecure_sources`
+ key in `sources.yaml`. Reference for `sources.yaml` is found in
+ the Kubewarden documentation in the reference section.
items:
type: string
type: array
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number of policy server replicas that can be unavailable
+ after the eviction
+ x-kubernetes-int-or-string: true
+ minAvailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Number of policy server replicas that must be still available
+ after the eviction
+ x-kubernetes-int-or-string: true
replicas:
description: Replicas is the number of desired replicas.
format: int32
@@ -1503,7 +1520,10 @@ spec:
type: array
description: Key value map of registry URIs endpoints to a list of
their associated PEM encoded certificate authorities that have to
- be used to verify the certificate used by the endpoint.
+ be used to verify the certificate used by the endpoint. The `sourceAuthorities`
+ content format corresponds with the contents of the `source_authorities`
+ key in `sources.yaml`. Reference for `sources.yaml` is found in
+ the Kubewarden documentation in the reference section.
type: object
verificationConfig:
description: Name of VerificationConfig configmap in the same namespace,
@@ -1749,7 +1769,10 @@ spec:
used for pulling policies from repositories.
type: string
insecureSources:
- description: List of insecure URIs to policy repositories.
+ description: List of insecure URIs to policy repositories. The `insecureSources`
+ content format corresponds with the contents of the `insecure_sources`
+ key in `sources.yaml`. Reference for `sources.yaml` is found in
+ the Kubewarden documentation in the reference section.
items:
type: string
type: array
@@ -1768,7 +1791,10 @@ spec:
type: array
description: Key value map of registry URIs endpoints to a list of
their associated PEM encoded certificate authorities that have to
- be used to verify the certificate used by the endpoint.
+ be used to verify the certificate used by the endpoint. The `sourceAuthorities`
+ content format corresponds with the contents of the `source_authorities`
+ key in `sources.yaml`. Reference for `sources.yaml` is found in
+ the Kubewarden documentation in the reference section.
type: object
verificationConfig:
description: Name of VerificationConfig configmap in the same namespace,
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index 08fc17a0d..a0dfa514c 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -155,3 +155,15 @@ rules:
- get
- patch
- update
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
diff --git a/controllers/policyserver_controller.go b/controllers/policyserver_controller.go
index 1a76783b4..f49f68c0b 100644
--- a/controllers/policyserver_controller.go
+++ b/controllers/policyserver_controller.go
@@ -57,6 +57,7 @@ type PolicyServerReconciler struct {
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:namespace=kubewarden,groups=apps,resources=replicasets,verbs=get;list;watch
//+kubebuilder:rbac:namespace=kubewarden,groups=core,resources=pods,verbs=get;list;watch
+//+kubebuilder:rbac:namespace=kubewarden,groups=policy,resources=poddisruptionbudgets,verbs=get;list;watch;create;update;patch;delete
func (r *PolicyServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
var policyServer policiesv1.PolicyServer
diff --git a/controllers/policyserver_controller_test.go b/controllers/policyserver_controller_test.go
index 72db663c2..e99cf8dbe 100644
--- a/controllers/policyserver_controller_test.go
+++ b/controllers/policyserver_controller_test.go
@@ -18,130 +18,269 @@ package controllers
import (
"fmt"
+ "time"
. "github.com/onsi/ginkgo/v2" //nolint:revive
. "github.com/onsi/gomega" //nolint:revive
+ "k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"github.com/kubewarden/kubewarden-controller/internal/pkg/constants"
policiesv1 "github.com/kubewarden/kubewarden-controller/pkg/apis/policies/v1"
+ k8spoliciesv1 "k8s.io/api/policy/v1"
)
var _ = Describe("PolicyServer controller", func() {
- policyServerName := newName("policy-server")
-
- BeforeEach(func() {
- Expect(
- k8sClient.Create(ctx, policyServerFactory(policyServerName)),
- ).To(haveSucceededOrAlreadyExisted())
- // Wait for the Service associated with the PolicyServer to be created
- Eventually(func(g Gomega) error {
- _, err := getTestPolicyServerService(policyServerName)
- return err
- }, timeout, pollInterval).Should(Succeed())
- })
+ Context("when starting with a new PolicyServer", func() {
+ policyServerName := newName("policy-server")
- Context("with no assigned policies", func() {
- It("should get its finalizer removed", func() {
- By("deleting the policy server")
+ BeforeEach(func() {
Expect(
- k8sClient.Delete(ctx, policyServerFactory(policyServerName)),
- ).To(Succeed())
-
- Eventually(func(g Gomega) (*policiesv1.PolicyServer, error) {
- return getTestPolicyServer(policyServerName)
- }, timeout, pollInterval).ShouldNot(
- HaveField("Finalizers", ContainElement(constants.KubewardenFinalizer)),
- )
+ k8sClient.Create(ctx, policyServerFactory(policyServerName)),
+ ).To(haveSucceededOrAlreadyExisted())
+ // Wait for the Service associated with the PolicyServer to be created
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServerService(policyServerName)
+ return err
+ }, timeout, pollInterval).Should(Succeed())
})
- AfterEach(func() {
+ Context("with no assigned policies", func() {
+ It("should get its finalizer removed", func() {
+ By("deleting the policy server")
+ Expect(
+ k8sClient.Delete(ctx, policyServerFactory(policyServerName)),
+ ).To(Succeed())
+
+ Eventually(func(g Gomega) (*policiesv1.PolicyServer, error) {
+ return getTestPolicyServer(policyServerName)
+ }, timeout, pollInterval).ShouldNot(
+ HaveField("Finalizers", ContainElement(constants.KubewardenFinalizer)),
+ )
+ })
+
+ AfterEach(func() {
+ // It's necessary remove the test finalizer to make the
+ // BeforeEach work as extected. Otherwise, the policy service
+ // creation will not work as expected
+ policyServer, err := getTestPolicyServer(policyServerName)
+ Expect(err).Should(Succeed())
+ controllerutil.RemoveFinalizer(policyServer, IntegrationTestsFinalizer)
+ err = reconciler.Client.Update(ctx, policyServer)
+ Expect(err).ToNot(HaveOccurred())
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServer(policyServerName)
+ return err
+ }, timeout, pollInterval).ShouldNot(Succeed())
+ })
+
+ })
+
+ Context("with assigned policies", Serial, func() {
+ policyName := newName("policy")
+
+ It("should delete assigned policies", func() {
+ By("creating a policy and assigning it to the policy server")
+ Expect(
+ k8sClient.Create(ctx, clusterAdmissionPolicyFactory(policyName, policyServerName, false)),
+ ).To(haveSucceededOrAlreadyExisted())
+
+ Expect(
+ getTestPolicyServerService(policyServerName),
+ ).To(
+ HaveField("DeletionTimestamp", BeNil()),
+ )
+
+ By("deleting the policy server")
+ Expect(
+ k8sClient.Delete(ctx, policyServerFactory(policyServerName)),
+ ).To(Succeed())
+
+ Eventually(func(g Gomega) (*policiesv1.ClusterAdmissionPolicy, error) {
+ return getTestClusterAdmissionPolicy(policyName)
+ }, timeout, pollInterval).ShouldNot(
+ HaveField("DeletionTimestamp", BeNil()),
+ )
+ })
+
+ It("should not delete its managed resources until all the scheduled policies are gone", func() {
+ By("having still policies pending deletion")
+ Expect(
+ getTestClusterAdmissionPolicy(policyName),
+ ).To(
+ And(
+ HaveField("DeletionTimestamp", Not(BeNil())),
+ HaveField("Finalizers", Not(ContainElement(constants.KubewardenFinalizer))),
+ HaveField("Finalizers", ContainElement(IntegrationTestsFinalizer)),
+ ),
+ )
+
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServerService(policyServerName)
+ return err
+ }).Should(Succeed())
+ })
+
+ It(fmt.Sprintf("should get its %q finalizer removed", constants.KubewardenFinalizer), func() {
+ By("not having policies assigned")
+ policy, err := getTestClusterAdmissionPolicy(policyName)
+ Expect(err).ToNot(HaveOccurred())
+
+ controllerutil.RemoveFinalizer(policy, IntegrationTestsFinalizer)
+ err = reconciler.Client.Update(ctx, policy)
+ Expect(err).ToNot(HaveOccurred())
+
+ // wait for the reconciliation loop of the ClusterAdmissionPolicy to remove the resource
+ Eventually(func(g Gomega) error {
+ _, err := getTestClusterAdmissionPolicy(policyName)
+ return err
+ }, timeout, pollInterval).ShouldNot(Succeed())
+
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServerService(policyServerName)
+ return err
+ }, timeout, pollInterval).ShouldNot(Succeed())
+
+ Eventually(func(g Gomega) (*policiesv1.PolicyServer, error) {
+ return getTestPolicyServer(policyServerName)
+ }, timeout, pollInterval).ShouldNot(
+ HaveField("Finalizers", ContainElement(constants.KubewardenFinalizer)),
+ )
+ })
+ })
+ })
+
+ Context("when starting policy server", func() {
+ policyServerName := newName("policy-server")
+
+ It("with MinAvailable PodDisruptionBudget configuration should create PDB", func() {
+ minAvailable := intstr.FromInt(2)
+ policyServer := policyServerFactory(policyServerName)
+ policyServer.Spec.MinAvailable = &minAvailable
// It's necessary remove the test finalizer to make the
- // BeforeEach work as extected. Otherwise, the policy service
- // creation will not work as expected
- policyServer, err := getTestPolicyServer(policyServerName)
- Expect(err).Should(Succeed())
+ // policy service goes away.
controllerutil.RemoveFinalizer(policyServer, IntegrationTestsFinalizer)
- err = reconciler.Client.Update(ctx, policyServer)
- Expect(err).ToNot(HaveOccurred())
+
+ Expect(
+ k8sClient.Create(ctx, policyServer),
+ ).To(haveSucceededOrAlreadyExisted())
+ // Wait for the Service associated with the PolicyServer to be created
Eventually(func(g Gomega) error {
_, err := getTestPolicyServer(policyServerName)
return err
- }, timeout, pollInterval).ShouldNot(Succeed())
- })
+ }, timeout, pollInterval).Should(Succeed())
+ Eventually(func(g Gomega) *k8spoliciesv1.PodDisruptionBudget {
+ pdb, _ := getPolicyServerPodDisruptionBudget(policyServerName)
+ return pdb
+ }, timeout, pollInterval).Should(policyServerPodDisruptionBudgetMatcher(policyServer, &minAvailable, nil))
- })
+ })
- Context("it has assigned policies", Serial, func() {
- policyName := newName("policy")
+ It("with MaxUnavailable PodDisruptionBudget configuration should create PDB", func() {
+ maxUnavailable := intstr.FromInt(2)
+ policyServer := policyServerFactory(policyServerName)
+ policyServer.Spec.MaxUnavailable = &maxUnavailable
+ // It's necessary remove the test finalizer to make the
+ // policy service goes away.
+ controllerutil.RemoveFinalizer(policyServer, IntegrationTestsFinalizer)
- It("should delete assigned policies", func() {
- By("creating a policy and assigning it to the policy server")
Expect(
- k8sClient.Create(ctx, clusterAdmissionPolicyFactory(policyName, policyServerName, false)),
+ k8sClient.Create(ctx, policyServer),
).To(haveSucceededOrAlreadyExisted())
+ // Wait for the Service associated with the PolicyServer to be created
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServer(policyServerName)
+ return err
+ }, timeout, pollInterval).Should(Succeed())
+ Eventually(func(g Gomega) *k8spoliciesv1.PodDisruptionBudget {
+ pdb, _ := getPolicyServerPodDisruptionBudget(policyServerName)
+ return pdb
+ }, timeout, pollInterval).Should(policyServerPodDisruptionBudgetMatcher(policyServer, nil, &maxUnavailable))
+ })
- Expect(
- getTestPolicyServerService(policyServerName),
- ).To(
- HaveField("DeletionTimestamp", BeNil()),
- )
+ It("with no PodDisruptionBudget configuration should not create PDB", func() {
+ policyServer := policyServerFactory(policyServerName)
+ // It's necessary remove the test finalizer to make the
+ // policy service goes away.
+ controllerutil.RemoveFinalizer(policyServer, IntegrationTestsFinalizer)
- By("deleting the policy server")
Expect(
- k8sClient.Delete(ctx, policyServerFactory(policyServerName)),
- ).To(Succeed())
-
- Eventually(func(g Gomega) (*policiesv1.ClusterAdmissionPolicy, error) {
- return getTestClusterAdmissionPolicy(policyName)
- }, timeout, pollInterval).ShouldNot(
- HaveField("DeletionTimestamp", BeNil()),
- )
+ k8sClient.Create(ctx, policyServer),
+ ).To(haveSucceededOrAlreadyExisted())
+ // Wait for the Service associated with the PolicyServer to be created
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServer(policyServerName)
+ return err
+ }, timeout, pollInterval).Should(Succeed())
+ // Wait for the Service associated with the PolicyServer to be created.
+ // The service reconciliation is after the PDB reconciliation.
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServerService(policyServerName)
+ return err
+ }, timeout, pollInterval).Should(Succeed())
+ Consistently(func(g Gomega) error {
+ _, err := getPolicyServerPodDisruptionBudget(policyServerName)
+ return err
+ }, 10*time.Second, pollInterval).ShouldNot(Succeed())
})
- It("should not delete its managed resources until all the scheduled policies are gone", func() {
- By("having still policies pending deletion")
- Expect(
- getTestClusterAdmissionPolicy(policyName),
- ).To(
- And(
- HaveField("DeletionTimestamp", Not(BeNil())),
- HaveField("Finalizers", Not(ContainElement(constants.KubewardenFinalizer))),
- HaveField("Finalizers", ContainElement(IntegrationTestsFinalizer)),
- ),
- )
+ It("when update policy server PodDisruptionBudget configuration should create PDB", func() {
+ policyServer := policyServerFactory(policyServerName)
+ // It's necessary remove the test finalizer to make the
+ // policy service goes away.
+ controllerutil.RemoveFinalizer(policyServer, IntegrationTestsFinalizer)
+ Expect(
+ k8sClient.Create(ctx, policyServer),
+ ).To(haveSucceededOrAlreadyExisted())
+ // Wait for the Service associated with the PolicyServer to be created
+ Eventually(func(g Gomega) error {
+ _, err := getTestPolicyServer(policyServerName)
+ return err
+ }, timeout, pollInterval).Should(Succeed())
+ // Wait for the Service associated with the PolicyServer to be created.
+ // The service reconciliation is after the PDB reconciliation.
Eventually(func(g Gomega) error {
_, err := getTestPolicyServerService(policyServerName)
return err
- }).Should(Succeed())
- })
+ }, timeout, pollInterval).Should(Succeed())
+ Consistently(func(g Gomega) error {
+ _, err := getPolicyServerPodDisruptionBudget(policyServerName)
+ return err
+ }, 10*time.Second, pollInterval).ShouldNot(Succeed())
- It(fmt.Sprintf("should get its %q finalizer removed", constants.KubewardenFinalizer), func() {
- By("not having policies assigned")
- policy, err := getTestClusterAdmissionPolicy(policyName)
+ policyServer, err := getTestPolicyServer(policyServerName)
Expect(err).ToNot(HaveOccurred())
+ maxUnavailable := intstr.FromInt(2)
+ policyServer.Spec.MaxUnavailable = &maxUnavailable
- controllerutil.RemoveFinalizer(policy, IntegrationTestsFinalizer)
- err = reconciler.Client.Update(ctx, policy)
+ err = k8sClient.Update(ctx, policyServer)
+ Expect(err).ToNot(HaveOccurred())
+
+ Eventually(func(g Gomega) *k8spoliciesv1.PodDisruptionBudget {
+ pdb, _ := getPolicyServerPodDisruptionBudget(policyServerName)
+ return pdb
+ }, timeout, pollInterval).Should(policyServerPodDisruptionBudgetMatcher(policyServer, nil, &maxUnavailable))
+
+ })
+
+ AfterEach(func() {
+ policyServer, err := getTestPolicyServer(policyServerName)
+ Expect(err).Should(Succeed())
+
+ err = reconciler.Client.Delete(ctx, policyServer)
Expect(err).ToNot(HaveOccurred())
- // wait for the reconciliation loop of the ClusterAdmissionPolicy to remove the resource
Eventually(func(g Gomega) error {
- _, err := getTestClusterAdmissionPolicy(policyName)
+ _, err := getTestPolicyServer(policyServerName)
return err
}, timeout, pollInterval).ShouldNot(Succeed())
Eventually(func(g Gomega) error {
- _, err := getTestPolicyServerService(policyServerName)
+ _, err := getPolicyServerPodDisruptionBudget(policyServerName)
return err
}, timeout, pollInterval).ShouldNot(Succeed())
-
- Eventually(func(g Gomega) (*policiesv1.PolicyServer, error) {
- return getTestPolicyServer(policyServerName)
- }, timeout, pollInterval).ShouldNot(
- HaveField("Finalizers", ContainElement(constants.KubewardenFinalizer)),
- )
})
})
+
})
diff --git a/controllers/utils_test.go b/controllers/utils_test.go
index a9c6ce5b5..95c988d0a 100644
--- a/controllers/utils_test.go
+++ b/controllers/utils_test.go
@@ -26,13 +26,16 @@ import (
"github.com/kubewarden/kubewarden-controller/internal/pkg/constants"
policiesv1 "github.com/kubewarden/kubewarden-controller/pkg/apis/policies/v1"
- . "github.com/onsi/gomega" //nolint:revive
+ . "github.com/onsi/gomega" //nolint:revive
+ . "github.com/onsi/gomega/gstruct" //nolint:revive
"github.com/onsi/gomega/types"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
corev1 "k8s.io/api/core/v1"
+ k8spoliciesv1 "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
)
@@ -220,3 +223,50 @@ func randStringRunes(n int) string {
func newName(prefix string) string {
return fmt.Sprintf("%s-%s", prefix, randStringRunes(8))
}
+
+func getPolicyServerPodDisruptionBudget(policyServerName string) (*k8spoliciesv1.PodDisruptionBudget, error) {
+ policyServer := policiesv1.PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: policyServerName,
+ },
+ }
+ podDisruptionBudgetName := policyServer.NameWithPrefix()
+ pdb := &k8spoliciesv1.PodDisruptionBudget{}
+ if err := reconciler.APIReader.Get(ctx, client.ObjectKey{Name: podDisruptionBudgetName, Namespace: DeploymentsNamespace}, pdb); err != nil {
+ return nil, errors.Join(errors.New("could not find PodDisruptionBudget"), err)
+ }
+ return pdb, nil
+}
+
+func policyServerPodDisruptionBudgetMatcher(policyServer *policiesv1.PolicyServer, minAvailable *intstr.IntOrString, maxUnavailable *intstr.IntOrString) types.GomegaMatcher { //nolint:ireturn
+ maxUnavailableMatcher := BeNil()
+ minAvailableMatcher := BeNil()
+ if minAvailable != nil {
+ minAvailableMatcher = PointTo(Equal(*minAvailable))
+ }
+ if maxUnavailable != nil {
+ maxUnavailableMatcher = PointTo(Equal(*maxUnavailable))
+ }
+ return SatisfyAll(
+ Not(BeNil()),
+ PointTo(MatchFields(IgnoreExtras, Fields{
+ "ObjectMeta": MatchFields(IgnoreExtras, Fields{
+ "OwnerReferences": ContainElement(MatchFields(IgnoreExtras, Fields{
+ "Name": Equal(policyServer.GetName()),
+ "Kind": Equal("PolicyServer"),
+ })),
+ }),
+ "Spec": MatchFields(IgnoreExtras, Fields{
+ "MaxUnavailable": maxUnavailableMatcher,
+ "MinAvailable": minAvailableMatcher,
+ "Selector": PointTo(MatchAllFields(Fields{
+ "MatchLabels": MatchAllKeys(Keys{
+ constants.AppLabelKey: Equal(policyServer.AppLabel()),
+ constants.PolicyServerLabelKey: Equal(policyServer.GetName()),
+ }),
+ "MatchExpressions": Ignore(),
+ })),
+ })}),
+ ),
+ )
+}
diff --git a/internal/pkg/admission/policy-server-pod-disruption-budget.go b/internal/pkg/admission/policy-server-pod-disruption-budget.go
new file mode 100644
index 000000000..4f949626b
--- /dev/null
+++ b/internal/pkg/admission/policy-server-pod-disruption-budget.go
@@ -0,0 +1,69 @@
+package admission
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/kubewarden/kubewarden-controller/internal/pkg/constants"
+ policiesv1 "github.com/kubewarden/kubewarden-controller/pkg/apis/policies/v1"
+ k8spoliciesv1 "k8s.io/api/policy/v1"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+)
+
+func (r *Reconciler) reconcilePolicyServerPodDisruptionBudget(ctx context.Context, policyServer *policiesv1.PolicyServer) error {
+ if policyServer.Spec.MinAvailable != nil || policyServer.Spec.MaxUnavailable != nil {
+ return reconcilePodDisruptionBudget(ctx, policyServer, r.Client, r.DeploymentsNamespace)
+ }
+ return deletePodDisruptionBudget(ctx, policyServer, r.Client, r.DeploymentsNamespace)
+}
+
+func deletePodDisruptionBudget(ctx context.Context, policyServer *policiesv1.PolicyServer, k8s client.Client, namespace string) error {
+ pdb := &k8spoliciesv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: policyServer.NameWithPrefix(),
+ Namespace: namespace,
+ },
+ }
+ err := client.IgnoreNotFound(k8s.Delete(ctx, pdb))
+ if err != nil {
+ err = errors.Join(fmt.Errorf("failed to delete PodDisruptionBudget"), err)
+ }
+ return err
+}
+
+func reconcilePodDisruptionBudget(ctx context.Context, policyServer *policiesv1.PolicyServer, k8s client.Client, namespace string) error {
+ pdb := &k8spoliciesv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: policyServer.NameWithPrefix(),
+ Namespace: namespace,
+ },
+ }
+ _, err := controllerutil.CreateOrUpdate(ctx, k8s, pdb, func() error {
+ pdb.Name = policyServer.NameWithPrefix()
+ pdb.Namespace = namespace
+ if err := controllerutil.SetOwnerReference(policyServer, pdb, k8s.Scheme()); err != nil {
+ return errors.Join(fmt.Errorf("failed to set policy server PDB owner reference"), err)
+ }
+
+ pdb.Spec.Selector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{
+ constants.AppLabelKey: policyServer.AppLabel(),
+ constants.PolicyServerLabelKey: policyServer.GetName(),
+ },
+ }
+ if policyServer.Spec.MinAvailable != nil {
+ pdb.Spec.MinAvailable = policyServer.Spec.MinAvailable
+ } else {
+ pdb.Spec.MaxUnavailable = policyServer.Spec.MaxUnavailable
+ }
+ return nil
+ })
+ if err != nil {
+ err = errors.Join(fmt.Errorf("failed to create or update PodDisruptionBudget"), err)
+ }
+ return err
+}
diff --git a/internal/pkg/admission/policy-server-pod-disruption-budget_test.go b/internal/pkg/admission/policy-server-pod-disruption-budget_test.go
new file mode 100644
index 000000000..fcce5df81
--- /dev/null
+++ b/internal/pkg/admission/policy-server-pod-disruption-budget_test.go
@@ -0,0 +1,189 @@
+package admission
+
+import (
+ "context"
+ "testing"
+
+ "github.com/kubewarden/kubewarden-controller/internal/pkg/constants"
+ policiesv1 "github.com/kubewarden/kubewarden-controller/pkg/apis/policies/v1"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ k8spoliciesv1 "k8s.io/api/policy/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+func TestPDBCreation(t *testing.T) {
+ one := 1
+ two := 2
+ tests := []struct {
+ name string
+ minAvailable *int
+ maxUnavailable *int
+ }{
+ {"with min value", &two, nil},
+ {"with max value", nil, &one},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ reconciler := newReconciler(nil, false)
+ policyServer := &policiesv1.PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ UID: "uid",
+ Name: "test",
+ Namespace: namespace,
+ },
+ }
+
+ if test.minAvailable != nil {
+ minAvailable := intstr.FromInt(*test.minAvailable)
+ policyServer.Spec.MinAvailable = &minAvailable
+ }
+ if test.maxUnavailable != nil {
+ maxUnavailable := intstr.FromInt(*test.maxUnavailable)
+ policyServer.Spec.MaxUnavailable = &maxUnavailable
+ }
+
+ err := reconciler.reconcilePolicyServerPodDisruptionBudget(context.Background(), policyServer)
+ require.NoError(t, err)
+
+ pdb := &k8spoliciesv1.PodDisruptionBudget{}
+ err = reconciler.Client.Get(context.Background(), client.ObjectKey{
+ Namespace: namespace,
+ Name: policyServer.NameWithPrefix(),
+ }, pdb)
+
+ require.NoError(t, err)
+ assert.Equal(t, policyServer.NameWithPrefix(), pdb.Name)
+ assert.Equal(t, policyServer.GetNamespace(), pdb.Namespace)
+ if test.minAvailable == nil {
+ assert.Nil(t, pdb.Spec.MinAvailable)
+ } else {
+ assert.Equal(t, intstr.FromInt(*test.minAvailable), *pdb.Spec.MinAvailable)
+ }
+ if test.maxUnavailable == nil {
+ assert.Nil(t, pdb.Spec.MaxUnavailable)
+ } else {
+ assert.Equal(t, intstr.FromInt(*test.maxUnavailable), *pdb.Spec.MaxUnavailable)
+ }
+ assert.Equal(t, policyServer.AppLabel(), pdb.Spec.Selector.MatchLabels[constants.AppLabelKey])
+ assert.Equal(t, policyServer.GetName(), pdb.Spec.Selector.MatchLabels[constants.PolicyServerLabelKey])
+ assert.Equal(t, pdb.OwnerReferences[0].UID, policyServer.UID)
+ })
+ }
+}
+
+func TestPDBUpdate(t *testing.T) {
+ one := 1
+ two := 2
+ tests := []struct {
+ name string
+ minAvailable *int
+ maxUnavailable *int
+ }{
+ {"with min value", &two, nil},
+ {"with max value", nil, &one},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ policyServer := &policiesv1.PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ UID: "uid",
+ Name: "test",
+ Namespace: namespace,
+ },
+ }
+ if test.minAvailable != nil {
+ minAvailable := intstr.FromInt(*test.minAvailable)
+ policyServer.Spec.MinAvailable = &minAvailable
+ }
+ if test.maxUnavailable != nil {
+ maxUnavailable := intstr.FromInt(*test.maxUnavailable)
+ policyServer.Spec.MaxUnavailable = &maxUnavailable
+ }
+
+ oldPDB := &k8spoliciesv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: policyServer.NameWithPrefix(),
+ Namespace: namespace,
+ },
+ Spec: k8spoliciesv1.PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 9,
+ },
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 8,
+ },
+ },
+ }
+
+ reconciler := newReconciler([]client.Object{oldPDB}, false)
+
+ err := reconciler.reconcilePolicyServerPodDisruptionBudget(context.Background(), policyServer)
+ require.NoError(t, err)
+
+ pdb := &k8spoliciesv1.PodDisruptionBudget{}
+ err = reconciler.Client.Get(context.Background(), client.ObjectKey{
+ Namespace: namespace,
+ Name: policyServer.NameWithPrefix(),
+ }, pdb)
+
+ require.NoError(t, err)
+ assert.Equal(t, policyServer.NameWithPrefix(), pdb.Name)
+ if test.minAvailable == nil {
+ assert.Equal(t, intstr.FromInt(9), *pdb.Spec.MinAvailable)
+ } else {
+ assert.Equal(t, intstr.FromInt(*test.minAvailable), *pdb.Spec.MinAvailable)
+ }
+ if test.maxUnavailable == nil {
+ assert.Equal(t, intstr.FromInt(8), *pdb.Spec.MaxUnavailable)
+ } else {
+ assert.Equal(t, intstr.FromInt(*test.maxUnavailable), *pdb.Spec.MaxUnavailable)
+ }
+ assert.Equal(t, policyServer.AppLabel(), pdb.Spec.Selector.MatchLabels[constants.AppLabelKey])
+ assert.Equal(t, policyServer.GetName(), pdb.Spec.Selector.MatchLabels[constants.PolicyServerLabelKey])
+ assert.Equal(t, pdb.OwnerReferences[0].UID, policyServer.UID)
+ })
+ }
+}
+
+func TestPDBDelete(t *testing.T) {
+ policyServer := &policiesv1.PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: namespace,
+ },
+ }
+ oldPDB := &k8spoliciesv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: policyServer.NameWithPrefix(),
+ Namespace: namespace,
+ },
+ Spec: k8spoliciesv1.PodDisruptionBudgetSpec{
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 9,
+ },
+ MaxUnavailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 8,
+ },
+ },
+ }
+ reconciler := newReconciler([]client.Object{oldPDB}, false)
+
+ err := reconciler.reconcilePolicyServerPodDisruptionBudget(context.Background(), policyServer)
+ require.NoError(t, err)
+
+ pdb := &k8spoliciesv1.PodDisruptionBudget{}
+ err = reconciler.Client.Get(context.Background(), client.ObjectKey{
+ Namespace: namespace,
+ Name: policyServer.NameWithPrefix(),
+ }, pdb)
+
+ require.Error(t, err)
+ require.NoError(t, client.IgnoreNotFound(err))
+}
diff --git a/internal/pkg/admission/reconciler.go b/internal/pkg/admission/reconciler.go
index 8f75f82a6..f4f25a2be 100644
--- a/internal/pkg/admission/reconciler.go
+++ b/internal/pkg/admission/reconciler.go
@@ -217,6 +217,20 @@ func (r *Reconciler) Reconcile(
string(policiesv1.PolicyServerConfigMapReconciled),
)
+ if err := r.reconcilePolicyServerPodDisruptionBudget(ctx, policyServer); err != nil {
+ setFalseConditionType(
+ &policyServer.Status.Conditions,
+ string(policiesv1.PolicyServerPodDisruptionBudgetReconciled),
+ fmt.Sprintf("error reconciling policy server PodDisruptionBudget: %v", err),
+ )
+ return err
+ }
+
+ setTrueConditionType(
+ &policyServer.Status.Conditions,
+ string(policiesv1.PolicyServerPodDisruptionBudgetReconciled),
+ )
+
if err := r.reconcilePolicyServerDeployment(ctx, policyServer); err != nil {
setFalseConditionType(
&policyServer.Status.Conditions,
diff --git a/internal/pkg/admission/reconciler_test.go b/internal/pkg/admission/reconciler_test.go
index 0a69e2a24..787a49e61 100644
--- a/internal/pkg/admission/reconciler_test.go
+++ b/internal/pkg/admission/reconciler_test.go
@@ -91,7 +91,7 @@ func TestGetPolicies(t *testing.T) {
func newReconciler(policies []client.Object, metricsEnabled bool) Reconciler {
customScheme := scheme.Scheme
- customScheme.AddKnownTypes(schema.GroupVersion{Group: "policies.kubewarden.io", Version: "v1"}, &policiesv1.ClusterAdmissionPolicy{}, &policiesv1.AdmissionPolicy{}, &policiesv1.ClusterAdmissionPolicyList{}, &policiesv1.AdmissionPolicyList{})
+ customScheme.AddKnownTypes(schema.GroupVersion{Group: "policies.kubewarden.io", Version: "v1"}, &policiesv1.ClusterAdmissionPolicy{}, &policiesv1.AdmissionPolicy{}, &policiesv1.ClusterAdmissionPolicyList{}, &policiesv1.AdmissionPolicyList{}, &policiesv1.PolicyServer{}, &policiesv1.PolicyServerList{})
cl := fake.NewClientBuilder().WithScheme(customScheme).WithObjects(policies...).Build()
return Reconciler{
diff --git a/main.go b/main.go
index 5bcf804be..9091444cd 100644
--- a/main.go
+++ b/main.go
@@ -31,6 +31,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+ k8spoliciesv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
@@ -165,6 +166,7 @@ func main() {
&corev1.Secret{}: namespaceSelector,
&corev1.Pod{}: namespaceSelector,
&corev1.Service{}: namespaceSelector,
+ &k8spoliciesv1.PodDisruptionBudget{}: namespaceSelector,
},
},
// These types of resources should never be cached because we need fresh
diff --git a/pkg/apis/policies/v1/policyserver_types.go b/pkg/apis/policies/v1/policyserver_types.go
index 26964eaf1..fbffb7af6 100644
--- a/pkg/apis/policies/v1/policyserver_types.go
+++ b/pkg/apis/policies/v1/policyserver_types.go
@@ -19,6 +19,7 @@ package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
// PolicyServerSecurity defines securityContext configuration to be used in the Policy Server workload
@@ -39,6 +40,12 @@ type PolicyServerSpec struct {
// Replicas is the number of desired replicas.
Replicas int32 `json:"replicas"`
+ // Number of policy server replicas that must be still available after the eviction
+ MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty"`
+
+ // Number of policy server replicas that can be unavailable after the eviction
+ MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty"`
+
// Annotations is an unstructured key value map stored with a resource that may be
// set by external tools to store and retrieve arbitrary metadata. They are not
// queryable and should be preserved when modifying objects.
@@ -121,6 +128,9 @@ const (
// PolicyServerServiceReconciled represents the condition of the
// Policy Server Service reconciliation
PolicyServerServiceReconciled PolicyServerConditionType = "ServiceReconciled"
+ // PolicyServerPodDisruptionBudgetReconciled represents the condition of the
+ // Policy Server PodDisruptionBudget reconciliation
+ PolicyServerPodDisruptionBudgetReconciled PolicyServerConditionType = "PodDisruptionBudgetReconciled"
)
// PolicyServerStatus defines the observed state of PolicyServer
diff --git a/pkg/apis/policies/v1/policyserver_webhook.go b/pkg/apis/policies/v1/policyserver_webhook.go
index df0e229fc..f8bd53699 100644
--- a/pkg/apis/policies/v1/policyserver_webhook.go
+++ b/pkg/apis/policies/v1/policyserver_webhook.go
@@ -84,6 +84,11 @@ func (v *policyServerValidator) validate(ctx context.Context, obj runtime.Object
}
}
+ // Kubernetes does not allow to set both MinAvailable and MaxUnavailable at the same time
+ if policyServer.Spec.MinAvailable != nil && policyServer.Spec.MaxUnavailable != nil {
+ return fmt.Errorf("minAvailable and maxUnavailable cannot be both set")
+ }
+
return nil
}
diff --git a/pkg/apis/policies/v1/policyserver_webhook_test.go b/pkg/apis/policies/v1/policyserver_webhook_test.go
index 832603951..483deb6ae 100644
--- a/pkg/apis/policies/v1/policyserver_webhook_test.go
+++ b/pkg/apis/policies/v1/policyserver_webhook_test.go
@@ -21,6 +21,7 @@ import (
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
func TestValidatePolicyServerName(t *testing.T) {
@@ -45,3 +46,25 @@ func TestValidatePolicyServerName(t *testing.T) {
err := policyServerValidator.validate(context.Background(), policyServer)
require.ErrorContains(t, err, "the PolicyServer name cannot be longer than 63 characters")
}
+
+func TestValidateMinAvailable(t *testing.T) {
+ intStrValue := intstr.FromInt(2)
+ policyServer := &PolicyServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "policy-server",
+ Namespace: "default",
+ },
+ Spec: PolicyServerSpec{
+ Image: "image",
+ Replicas: 1,
+ MinAvailable: &intStrValue,
+ MaxUnavailable: &intStrValue,
+ },
+ }
+ policyServerValidator := policyServerValidator{
+ k8sClient: nil,
+ deploymentsNamespace: "default",
+ }
+ err := policyServerValidator.validate(context.Background(), policyServer)
+ require.ErrorContains(t, err, "minAvailable and maxUnavailable cannot be both set")
+}
diff --git a/pkg/apis/policies/v1/zz_generated.deepcopy.go b/pkg/apis/policies/v1/zz_generated.deepcopy.go
index be558c340..780c7581d 100644
--- a/pkg/apis/policies/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/policies/v1/zz_generated.deepcopy.go
@@ -26,6 +26,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/intstr"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -290,6 +291,16 @@ func (in *PolicyServerSecurity) DeepCopy() *PolicyServerSecurity {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyServerSpec) DeepCopyInto(out *PolicyServerSpec) {
*out = *in
+ if in.MinAvailable != nil {
+ in, out := &in.MinAvailable, &out.MinAvailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
+ if in.MaxUnavailable != nil {
+ in, out := &in.MaxUnavailable, &out.MaxUnavailable
+ *out = new(intstr.IntOrString)
+ **out = **in
+ }
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))