Skip to content

Commit

Permalink
Merge pull request #1119 from akrejcir/validator-tls-config-map
Browse files Browse the repository at this point in the history
feat: validator: Load TLS configuration from ConfigMap
  • Loading branch information
kubevirt-bot authored Nov 25, 2024
2 parents 9ce1906 + a369026 commit 574b9e8
Show file tree
Hide file tree
Showing 11 changed files with 776 additions and 218 deletions.
4 changes: 2 additions & 2 deletions internal/common/crypto_policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@ import (
)

type SSPTLSOptions struct {
MinTLSVersion string
OpenSSLCipherNames []string
MinTLSVersion string `json:"minTLSVersion,omitempty"`
OpenSSLCipherNames []string `json:"openSSLCipherNames,omitempty"`
}

func (s *SSPTLSOptions) IsEmpty() bool {
Expand Down
24 changes: 20 additions & 4 deletions internal/operands/template-validator/reconcile.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package template_validator

import (
"encoding/json"

admission "k8s.io/api/admissionregistration/v1"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
Expand Down Expand Up @@ -28,6 +30,7 @@ func WatchTypes() []operands.WatchType {
{Object: &v1.ServiceAccount{}},
{Object: &v1.Service{}},
{Object: &apps.Deployment{}, WatchFullObject: true},
{Object: &v1.ConfigMap{}},
}
}

Expand Down Expand Up @@ -60,6 +63,7 @@ func (t *templateValidator) Reconcile(request *common.Request) ([]common.Reconci
reconcileClusterRoleBinding,
reconcileService,
reconcilePrometheusService,
reconcileConfigMap,
reconcileDeployment,
reconcileValidatingWebhook,
)
Expand Down Expand Up @@ -133,16 +137,28 @@ func reconcileDeployment(request *common.Request) (common.ReconcileResult, error
}
}

deployment := newDeployment(request.Namespace, numberOfReplicas, image)
common.AddAppLabels(request.Instance, operandName, operandComponent, &deployment.Spec.Template.ObjectMeta)
injectPlacementMetadata(&deployment.Spec.Template.Spec, validatorSpec)
return common.CreateOrUpdate(request).
NamespacedResource(deployment).
WithAppLabels(operandName, operandComponent).
Reconcile()
}

func reconcileConfigMap(request *common.Request) (common.ReconcileResult, error) {
sspTLSOptions, err := common.NewSSPTLSOptions(request.Instance.Spec.TLSSecurityProfile, nil)
if err != nil {
return common.ReconcileResult{}, err
}

deployment := newDeployment(request.Namespace, numberOfReplicas, image, sspTLSOptions)
common.AddAppLabels(request.Instance, operandName, operandComponent, &deployment.Spec.Template.ObjectMeta)
injectPlacementMetadata(&deployment.Spec.Template.Spec, validatorSpec)
sspTLSOptionsJson, err := json.Marshal(sspTLSOptions)
if err != nil {
return common.ReconcileResult{}, err
}

return common.CreateOrUpdate(request).
NamespacedResource(deployment).
NamespacedResource(newConfigMap(request.Namespace, string(sspTLSOptionsJson))).
WithAppLabels(operandName, operandComponent).
Reconcile()
}
Expand Down
8 changes: 4 additions & 4 deletions internal/operands/template-validator/reconcile_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ import (
)

var log = logf.Log.WithName("validator_operand")
var emptySSPTLSConfig = &common.SSPTLSOptions{}

var _ = Describe("Template validator operand", func() {
const (
Expand Down Expand Up @@ -85,7 +84,8 @@ var _ = Describe("Template validator operand", func() {
ExpectResourceExists(newServiceAccount(namespace), request)
ExpectResourceExists(newClusterRoleBinding(namespace), request)
ExpectResourceExists(newService(namespace), request)
ExpectResourceExists(newDeployment(namespace, replicas, "test-img", emptySSPTLSConfig), request)
ExpectResourceExists(newConfigMap(namespace, ""), request)
ExpectResourceExists(newDeployment(namespace, replicas, "test-img"), request)
ExpectResourceExists(newValidatingWebhook(namespace), request)
ExpectResourceExists(newPrometheusService(namespace), request)
})
Expand Down Expand Up @@ -153,7 +153,7 @@ var _ = Describe("Template validator operand", func() {
Expect(err).ToNot(HaveOccurred())

// Set status for deployment
key := client.ObjectKeyFromObject(newDeployment(namespace, replicas, "test-img", emptySSPTLSConfig))
key := client.ObjectKeyFromObject(newDeployment(namespace, replicas, "test-img"))
updateDeploymentStatus(key, &request, func(deploymentStatus *apps.DeploymentStatus) {
deploymentStatus.Replicas = replicas
deploymentStatus.ReadyReplicas = 0
Expand Down Expand Up @@ -381,7 +381,7 @@ var _ = Describe("Template validator operand", func() {
_, err := operand.Reconcile(&request)
Expect(err).ToNot(HaveOccurred())
deployment := &apps.Deployment{}
key := client.ObjectKeyFromObject(newDeployment(namespace, replicas, "test-img", emptySSPTLSConfig))
key := client.ObjectKeyFromObject(newDeployment(namespace, replicas, "test-img"))
Expect(request.Client.Get(request.Context, key, deployment)).To(Succeed())
Expect(deployment.Spec.Template.Spec.Affinity.NodeAffinity).To(Equal(expectedNodeAffinity))
Expect(deployment.Spec.Template.Spec.Affinity.PodAffinity).To(Equal(expectedPodAffinity))
Expand Down
49 changes: 33 additions & 16 deletions internal/operands/template-validator/resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package template_validator

import (
"fmt"
"strings"

templatev1 "github.com/openshift/api/template/v1"
admission "k8s.io/api/admissionregistration/v1"
Expand All @@ -16,7 +15,6 @@ import (
kubevirt "kubevirt.io/api/core"
kubevirtv1 "kubevirt.io/api/core/v1"

"kubevirt.io/ssp-operator/internal/common"
"kubevirt.io/ssp-operator/internal/env"
common_templates "kubevirt.io/ssp-operator/internal/operands/common-templates"
metrics "kubevirt.io/ssp-operator/internal/operands/metrics"
Expand All @@ -37,6 +35,7 @@ const (
ServiceName = VirtTemplateValidator
MetricsServiceName = "template-validator-metrics"
DeploymentName = VirtTemplateValidator
ConfigMapName = VirtTemplateValidator
PrometheusLabel = "prometheus.ssp.kubevirt.io"
kubernetesHostnameTopologyKey = "kubernetes.io/hostname"
)
Expand Down Expand Up @@ -145,9 +144,11 @@ func newPodAntiAffinity(key, topologyKey string, operator metav1.LabelSelectorOp
}
}

func newDeployment(namespace string, replicas int32, image string, sspTLSOptions *common.SSPTLSOptions) *apps.Deployment {
const volumeName = "tls"
func newDeployment(namespace string, replicas int32, image string) *apps.Deployment {
const secretVolumeName = "tls"
const configMapVolumeName = "config-map"
const certMountPath = "/etc/webhook/certs"
const configMapMountPath = "/tls-options"
trueVal := true
falseVal := false

Expand Down Expand Up @@ -196,20 +197,14 @@ func newDeployment(namespace string, replicas int32, image string, sspTLSOptions
fmt.Sprintf("--port=%d", ContainerPort),
fmt.Sprintf("--cert-dir=%s", certMountPath),
},
Env: []core.EnvVar{
{
Name: tlsinfo.CiphersEnvName,
Value: strings.Join(sspTLSOptions.OpenSSLCipherNames, ","),
},
{
Name: tlsinfo.TLSMinVersionEnvName,
Value: sspTLSOptions.MinTLSVersion,
},
},
VolumeMounts: []core.VolumeMount{{
Name: volumeName,
Name: secretVolumeName,
MountPath: certMountPath,
ReadOnly: true,
}, {
Name: configMapVolumeName,
MountPath: configMapMountPath,
ReadOnly: true,
}},
SecurityContext: &core.SecurityContext{
ReadOnlyRootFilesystem: &trueVal,
Expand Down Expand Up @@ -240,12 +235,21 @@ func newDeployment(namespace string, replicas int32, image string, sspTLSOptions
},
}},
Volumes: []core.Volume{{
Name: volumeName,
Name: secretVolumeName,
VolumeSource: core.VolumeSource{
Secret: &core.SecretVolumeSource{
SecretName: SecretName,
},
},
}, {
Name: configMapVolumeName,
VolumeSource: core.VolumeSource{
ConfigMap: &core.ConfigMapVolumeSource{
LocalObjectReference: core.LocalObjectReference{
Name: ConfigMapName,
},
},
},
}},
Affinity: &core.Affinity{
PodAntiAffinity: podAntiAffinity,
Expand All @@ -256,6 +260,19 @@ func newDeployment(namespace string, replicas int32, image string, sspTLSOptions
}
}

func newConfigMap(namespace string, tlsOptionsJson string) *core.ConfigMap {
return &core.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: ConfigMapName,
Namespace: namespace,
Labels: CommonLabels(),
},
Data: map[string]string{
tlsinfo.TLSOptionsFilename: tlsOptionsJson,
},
}
}

func newValidatingWebhook(serviceNamespace string) *admission.ValidatingWebhookConfiguration {
fail := admission.Fail
sideEffectsNone := admission.SideEffectClassNone
Expand Down
128 changes: 128 additions & 0 deletions internal/template-validator/filewatch/filewatch.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
package filewatch

import (
"fmt"
"strings"
"sync"
"sync/atomic"

"github.com/fsnotify/fsnotify"
)

type Watch interface {
Add(path string, callback func()) error
Run(done <-chan struct{}) error
IsRunning() bool
}

func New() Watch {
return &watch{
callbacks: make(map[string]func()),
}
}

type watch struct {
lock sync.Mutex
callbacks map[string]func()
running atomic.Bool
}

var _ Watch = &watch{}

func (w *watch) Add(path string, callback func()) error {
w.lock.Lock()
defer w.lock.Unlock()

if w.running.Load() {
return fmt.Errorf("cannot add to a running watch")
}

w.callbacks[path] = callback
return nil
}

func (w *watch) Run(done <-chan struct{}) error {
watcher, err := fsnotify.NewWatcher()
if err != nil {
return fmt.Errorf("could not create fsnotify.Watcher: %w", err)
}
// watcher.Close() never returns an error
defer func() { _ = watcher.Close() }()

func() {
// Before setting running to true, we need to acquire the lock,
// because Add() method may be running concurrently.
w.lock.Lock()
defer w.lock.Unlock()
w.running.Store(true)
}()
// Setting running to false is ok without a lock.
defer w.running.Store(false)

err = w.addCallbacks(watcher)
if err != nil {
return fmt.Errorf("could not add callbacks: %w", err)
}
// Running all callbacks before processing watch events.
// So callbacks will notice the state of the files after
// watch starts, but before any events arrive.
w.runCallbacks()

return w.processEvents(watcher, done)
}

func (w *watch) IsRunning() bool {
return w.running.Load()
}

func (w *watch) addCallbacks(watcher *fsnotify.Watcher) error {
for path := range w.callbacks {
err := watcher.Add(path)
if err != nil {
return fmt.Errorf("failed watch %s: %w", path, err)
}
}
return nil
}

func (w *watch) runCallbacks() {
for _, callback := range w.callbacks {
callback()
}
}

func (w *watch) processEvents(watcher *fsnotify.Watcher, done <-chan struct{}) error {
for {
select {
case <-done:
return nil

case event, ok := <-watcher.Events:
if !ok {
return nil
}
w.handleEvent(event)

case err, ok := <-watcher.Errors:
if !ok {
return nil
}
if err != nil {
return err
}
}
}
}

func (w *watch) handleEvent(event fsnotify.Event) {
const modificationEvents = fsnotify.Create | fsnotify.Write | fsnotify.Remove
if event.Op&modificationEvents == 0 {
return
}

for path, callback := range w.callbacks {
if strings.HasPrefix(event.Name, path) {
callback()
}
}
}
Loading

0 comments on commit 574b9e8

Please sign in to comment.