Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(nimbus-k8tls): Remove ownerref from ns #197

Merged
merged 2 commits into from
Jun 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 23 additions & 22 deletions .github/workflows/pr-checks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -153,13 +153,18 @@ jobs:
kind load docker-image 5gsec/nimbus:latest --name=testing
- name: Install Nimbus
working-directory: ./deployments/nimbus
run: |
helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \
--set image.pullPolicy=Never \
--set autoDeploy.kubearmor=false \
--set autoDeploy.kyverno=false \
--set autoDeploy.netpol=false
- name: Wait for Nimbus to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus
kubectl get pods -A
kubectl get pods -n nimbus
- name: Run Tests
run: make integration-test
Expand All @@ -183,7 +188,7 @@ jobs:
uses: helm/kind-action@v1
with:
cluster_name: testing

- name: Build nimbus image and load in the kind cluster
run: |
make docker-build
Expand All @@ -200,30 +205,26 @@ jobs:
run: |
make docker-build
kind load docker-image 5gsec/nimbus-kubearmor:latest --name=testing
- name: Build nimbus-kyverno image and load in the kind cluster
working-directory: ./pkg/adapter/nimbus-kyverno
run: |
make docker-build
kind load docker-image 5gsec/nimbus-kyverno:latest --name=testing
- name: Install Kubearmor CRDs
run: |
kubectl create -f https://raw.githubusercontent.com/kubearmor/KubeArmor/main/deployments/CRD/KubeArmorPolicy.yaml
- name: Install Kyverno CRDs
run: |
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_clusterpolicies.yaml
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_policies.yaml
- name: Install Nimbus
working-directory: ./deployments/nimbus
run: |
helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \
--set image.pullPolicy=Never \
--set autoDeploy.kubearmor=false \
--set autoDeploy.kyverno=false \
--set autoDeploy.netpol=false
- name: Wait for Nimbus to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-netpol
working-directory: deployments/nimbus-netpol/
Expand All @@ -233,27 +234,27 @@ jobs:
- name: Wait for nimbus-netpol to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-netpol
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-kubearmor
working-directory: deployments/nimbus-kubearmor/
run: |
helm upgrade --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never
- name: Wait for nimbus-kubearmor to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kubearmor
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-kyverno
working-directory: deployments/nimbus-kyverno/
run: |
helm upgrade --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never
- name: Wait for nimbus-kyverno to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kyverno
kubectl get pods -A
kubectl get pods -n nimbus
- name: Run Tests
run: make e2e-test
2 changes: 1 addition & 1 deletion pkg/adapter/nimbus-k8tls/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
)

func main() {
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
ctrl.SetLogger(zap.New())
logger := ctrl.Log

ctx, cancelFunc := context.WithCancel(context.Background())
Expand Down
4 changes: 2 additions & 2 deletions pkg/adapter/nimbus-k8tls/manager/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func createOrUpdateCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.Clu
logger.Info("configured Kubernetes CronJob", "CronJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace)
}

if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnp.Name, false); err != nil {
if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnp.Name, false); err != nil {
logger.Error(err, "failed to update ClusterNimbusPolicy status")
}
}
Expand All @@ -67,7 +67,7 @@ func deleteCronJobs(ctx context.Context, logger logr.Logger, cwnpName string, cr
continue
}

if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnpName, true); err != nil {
if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnpName, true); err != nil {
logger.Error(err, "failed to update ClusterNimbusPolicy status")
}
logger.Info("Dangling Kubernetes CronJob deleted", "CronJobJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace)
Expand Down
12 changes: 10 additions & 2 deletions pkg/adapter/nimbus-k8tls/manager/k8tls.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,16 @@ func setupK8tlsEnv(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, schem
objs := []client.Object{ns, cm, sa, clusterRole, clusterRoleBinding}
for idx := range objs {
objToCreate := objs[idx]
if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil {
return err

// Don't set owner ref on namespace. In environments with configured Pod Security
// Standards labelling namespaces becomes a requirement. However, on deletion of
// CWNP a namespace with ownerReferences set also gets deleted. Since we need to
// keep the nimbus-k8tls-env namespace labeled, removing the ownerReferences
// prevents this deletion.
if idx != 0 {
if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil {
return err
}
}

var existingObj client.Object
Expand Down
Loading