Skip to content

Commit

Permalink
Merge branch 'main' into mutate_logic_update
Browse files Browse the repository at this point in the history
Signed-off-by: Ved Ratan <82467006+VedRatan@users.noreply.github.com>
  • Loading branch information
VedRatan authored Jun 26, 2024
2 parents 794b380 + 7f047bd commit 3ed0b2e
Show file tree
Hide file tree
Showing 9 changed files with 60 additions and 32 deletions.
45 changes: 23 additions & 22 deletions .github/workflows/pr-checks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -153,13 +153,18 @@ jobs:
kind load docker-image 5gsec/nimbus:latest --name=testing
- name: Install Nimbus
working-directory: ./deployments/nimbus
run: |
helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \
--set image.pullPolicy=Never \
--set autoDeploy.kubearmor=false \
--set autoDeploy.kyverno=false \
--set autoDeploy.netpol=false
- name: Wait for Nimbus to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus
kubectl get pods -A
kubectl get pods -n nimbus
- name: Run Tests
run: make integration-test
Expand All @@ -183,7 +188,7 @@ jobs:
uses: helm/kind-action@v1
with:
cluster_name: testing

- name: Build nimbus image and load in the kind cluster
run: |
make docker-build
Expand All @@ -200,30 +205,26 @@ jobs:
run: |
make docker-build
kind load docker-image 5gsec/nimbus-kubearmor:latest --name=testing
- name: Build nimbus-kyverno image and load in the kind cluster
working-directory: ./pkg/adapter/nimbus-kyverno
run: |
make docker-build
kind load docker-image 5gsec/nimbus-kyverno:latest --name=testing
- name: Install Kubearmor CRDs
run: |
kubectl create -f https://raw.githubusercontent.com/kubearmor/KubeArmor/main/deployments/CRD/KubeArmorPolicy.yaml
- name: Install Kyverno CRDs
run: |
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_clusterpolicies.yaml
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/crds/kyverno/kyverno.io_policies.yaml
- name: Install Nimbus
working-directory: ./deployments/nimbus
run: |
helm upgrade --install nimbus-operator deployments/nimbus -n nimbus --create-namespace --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-operator . -n nimbus --create-namespace \
--set image.pullPolicy=Never \
--set autoDeploy.kubearmor=false \
--set autoDeploy.kyverno=false \
--set autoDeploy.netpol=false
- name: Wait for Nimbus to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-netpol
working-directory: deployments/nimbus-netpol/
Expand All @@ -233,27 +234,27 @@ jobs:
- name: Wait for nimbus-netpol to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-netpol
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-kubearmor
working-directory: deployments/nimbus-kubearmor/
run: |
helm upgrade --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-kubearmor . -n nimbus --set image.pullPolicy=Never
- name: Wait for nimbus-kubearmor to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kubearmor
kubectl get pods -A
kubectl get pods -n nimbus
- name: Install nimbus-kyverno
working-directory: deployments/nimbus-kyverno/
run: |
helm upgrade --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never
helm upgrade --dependency-update --install nimbus-kyverno . -n nimbus --set image.pullPolicy=Never
- name: Wait for nimbus-kyverno to start
run: |
kubectl wait --for=condition=ready --timeout=5m -n nimbus pod -l app.kubernetes.io/name=nimbus-kyverno
kubectl get pods -A
kubectl get pods -n nimbus
- name: Run Tests
run: make e2e-test
16 changes: 16 additions & 0 deletions deployments/nimbus-kyverno/templates/clusterrole.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/component: background-controller
app.kubernetes.io/instance: kyverno
app.kubernetes.io/part-of: kyverno
name: kyverno:update-resources
rules:
- apiGroups:
- '*'
resources:
- '*'
verbs:
- update
- patch
2 changes: 1 addition & 1 deletion pkg/adapter/nimbus-k8tls/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import (
)

func main() {
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
ctrl.SetLogger(zap.New())
logger := ctrl.Log

ctx, cancelFunc := context.WithCancel(context.Background())
Expand Down
4 changes: 2 additions & 2 deletions pkg/adapter/nimbus-k8tls/manager/cronjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func createOrUpdateCj(ctx context.Context, logger logr.Logger, cwnp v1alpha1.Clu
logger.Info("configured Kubernetes CronJob", "CronJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace)
}

if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnp.Name, false); err != nil {
if err = adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnp.Name, false); err != nil {
logger.Error(err, "failed to update ClusterNimbusPolicy status")
}
}
Expand All @@ -67,7 +67,7 @@ func deleteCronJobs(ctx context.Context, logger logr.Logger, cwnpName string, cr
continue
}

if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, "CronJob/"+cronJob.Name, cwnpName, true); err != nil {
if err := adapterutil.UpdateCwnpStatus(ctx, k8sClient, cronJob.Namespace+"/CronJob/"+cronJob.Name, cwnpName, true); err != nil {
logger.Error(err, "failed to update ClusterNimbusPolicy status")
}
logger.Info("Dangling Kubernetes CronJob deleted", "CronJobJob.Name", cronJob.Name, "CronJob.Namespace", cronJob.Namespace)
Expand Down
12 changes: 10 additions & 2 deletions pkg/adapter/nimbus-k8tls/manager/k8tls.go
Original file line number Diff line number Diff line change
Expand Up @@ -202,8 +202,16 @@ func setupK8tlsEnv(ctx context.Context, cwnp v1alpha1.ClusterNimbusPolicy, schem
objs := []client.Object{ns, cm, sa, clusterRole, clusterRoleBinding}
for idx := range objs {
objToCreate := objs[idx]
if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil {
return err

// Don't set owner ref on namespace. In environments with configured Pod Security
// Standards labelling namespaces becomes a requirement. However, on deletion of
// CWNP a namespace with ownerReferences set also gets deleted. Since we need to
// keep the nimbus-k8tls-env namespace labeled, removing the ownerReferences
// prevents this deletion.
if idx != 0 {
if err := ctrl.SetControllerReference(&cwnp, objToCreate, scheme); err != nil {
return err
}
}

var existingObj client.Object
Expand Down
2 changes: 1 addition & 1 deletion pkg/adapter/nimbus-kyverno/clusterrole.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ metadata:
app.kubernetes.io/component: background-controller
app.kubernetes.io/instance: kyverno
app.kubernetes.io/part-of: kyverno
name: kyverno:update-pods
name: kyverno:update-resources
rules:
- apiGroups:
- '*'
Expand Down
2 changes: 1 addition & 1 deletion pkg/adapter/nimbus-kyverno/processor/kcpbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ func clusterCocoRuntimeAddition(cnp *v1alpha1.ClusterNimbusPolicy, rule v1alpha1
resourceFilter = kyvernov1.ResourceFilter{
ResourceDescription: kyvernov1.ResourceDescription{
Kinds: []string{
"v1/Pod",
"apps/v1/Deployment",
},
Namespaces: cnp.Spec.NsSelector.MatchNames,
Selector: &metav1.LabelSelector{
Expand Down
6 changes: 6 additions & 0 deletions pkg/adapter/nimbus-kyverno/processor/kpbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,12 @@ func cocoRuntimeAddition(np *v1alpha1.NimbusPolicy, rule v1alpha1.Rule) ([]kyver
Kinds: []string{
"apps/v1/Deployment",
},
Targets: []kyvernov1.TargetResourceSpec{
kyvernov1.TargetResourceSpec{
ResourceSpec: kyvernov1.ResourceSpec{
APIVersion: "apps/v1",
Kind: "Deployment",
Namespace: np.Namespace,
},
},
},
Expand Down
3 changes: 0 additions & 3 deletions pkg/processor/policybuilder/nimbuspolicy_builder.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,6 @@ func BuildNimbusPolicy(ctx context.Context, logger logr.Logger, k8sClient client
if err != nil {
return nil, err
}
if len(matchLabels) == 0 {
return nil, errors.Wrap(err, "No labels matched the CEL expressions, aborting NimbusPolicy creation due to missing keys in labels")
}

nimbusPolicy := &v1.NimbusPolicy{
ObjectMeta: metav1.ObjectMeta{
Expand Down

0 comments on commit 3ed0b2e

Please sign in to comment.