From 65aba2c43487dab86969ad0ea04f05ce8fb4c7b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Wed, 13 Jul 2022 12:56:48 +0200 Subject: [PATCH 01/13] closes #3183 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian Görg --- .../plugin/workloadattestor/k8s/k8s_posix.go | 37 ++- .../plugin/workloadattestor/k8s/k8s_test.go | 69 +++- .../k8s/testdata/cgroups_pid_in_crio_pod.txt | 1 + .../k8s/testdata/crio_pod_list.json | 157 +++++++++ .../crio_pod_list_duplicate_containerId.json | 305 ++++++++++++++++++ 5 files changed, 560 insertions(+), 9 deletions(-) create mode 100644 pkg/agent/plugin/workloadattestor/k8s/testdata/cgroups_pid_in_crio_pod.txt create mode 100644 pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list.json create mode 100644 pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list_duplicate_containerId.json diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 4f22fbf5d6..06c58cdb1f 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -190,21 +190,31 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque return nil, err } + var attestResponse *workloadattestorv1.AttestResponse = nil for _, item := range list.Items { item := item - if item.UID != podUID { + + // podUID can be empty, when cgroup contains only containerID + if item.UID != podUID && podUID != "" { continue } - status, lookup := lookUpContainerInPod(containerID, item.Status) + lookupStatus, lookup := lookUpContainerInPod(containerID, item.Status) switch lookup { case containerInPod: - return &workloadattestorv1.AttestResponse{ - SelectorValues: getSelectorValuesFromPodInfo(&item, status), - }, nil + if attestResponse != nil { + log.Warn("Two pods found with same container Id") + return nil, status.Error(codes.Aborted, "Two pods found with same container Id") + } + attestResponse = &workloadattestorv1.AttestResponse{ + SelectorValues: getSelectorValuesFromPodInfo(&item, lookupStatus), + } case containerNotInPod: } } + if attestResponse != nil { + return attestResponse, nil + } // if the container was not located after the maximum number of attempts then the search is over. if attempt >= config.MaxPollAttempts { @@ -582,6 +592,16 @@ var cgroupRE = regexp.MustCompile(`` + // non-punctuation end of string, i.e., the container ID `([[:^punct:]]+)$`) +// cgroupNoPodUidRE is the backup regex, when cgroupRE does not match +// This regex applies for container runtimes, that won't put the PodUID into +// the cgroup name. +// Currently only cri-o is known for this abnormaly. +var cgroupNoPodUidRE = regexp.MustCompile(`` + + // /crio- + `[[:punct:]]crio[[:punct:]]` + + // non-punctuation end of string, i.e., the container ID + `([[:^punct:]]+)$`) + func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string, bool) { // We are only interested in kube pods entries, for example: // - /kubepods/burstable/pod2c48913c-b29f-11e7-9350-020968147796/9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961 @@ -589,7 +609,7 @@ func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string // - /kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c48913c-b29f-11e7-9350-020968147796.slice/docker-9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961.scope // - /kubepods-besteffort-pod72f7f152_440c_66ac_9084_e0fc1d8a910c.slice:cri-containerd:b2a102854b4969b2ce98dc329c86b4fb2b06e4ad2cc8da9d8a7578c9cd2004a2" // - /../../pod2c48913c-b29f-11e7-9350-020968147796/9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961 - + // - 0::/../crio-45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814.scope // First trim off any .scope suffix. This allows for a cleaner regex since // we don't have to muck with greediness. TrimSuffix is no-copy so this // is cheap. @@ -598,6 +618,11 @@ func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string matches := cgroupRE.FindStringSubmatch(cgroupPath) if matches != nil { return canonicalizePodUID(matches[1]), matches[2], true + } else { + matches := cgroupNoPodUidRE.FindStringSubmatch(cgroupPath) + if matches != nil { + return "", matches[1], true + } } return "", "", false } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go index bd3bfb484f..15b43ff064 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go @@ -39,12 +39,15 @@ import ( const ( pid = 123 - podListFilePath = "testdata/pod_list.json" - kindPodListFilePath = "testdata/kind_pod_list.json" - podListNotRunningFilePath = "testdata/pod_list_not_running.json" + podListFilePath = "testdata/pod_list.json" + kindPodListFilePath = "testdata/kind_pod_list.json" + crioPodListFilePath = "testdata/crio_pod_list.json" + crioPodListDuplicateContainerIdFilePath = "testdata/crio_pod_list_duplicate_containerId.json" + podListNotRunningFilePath = "testdata/pod_list_not_running.json" cgPidInPodFilePath = "testdata/cgroups_pid_in_pod.txt" cgPidInKindPodFilePath = "testdata/cgroups_pid_in_kind_pod.txt" + cgPidInCrioPodFilePath = "testdata/cgroups_pid_in_crio_pod.txt" cgInitPidInPodFilePath = "testdata/cgroups_init_pid_in_pod.txt" cgPidNotInPodFilePath = "testdata/cgroups_pid_not_in_pod.txt" cgSystemdPidInPodFilePath = "testdata/systemd_cgroups_pid_in_pod.txt" @@ -110,6 +113,25 @@ FwOGLt+I3+9beT0vo+pn9Rq0squewFYe3aJbwpkyfP2xOovQCdm4PC8y {Type: "k8s", Value: "sa:default"}, } + testCrioPodSelectors = []*common.Selector{ + {Type: "k8s", Value: "container-image:gcr.io/spiffe-io/spire-agent:0.8.1"}, + {Type: "k8s", Value: "container-image:gcr.io/spiffe-io/spire-agent@sha256:1e4c481d76e9ecbd3d8684891e0e46aa021a30920ca04936e1fdcc552747d941"}, + {Type: "k8s", Value: "container-name:workload-api-client"}, + {Type: "k8s", Value: "node-name:a37b7d23-d32a-4932-8f33-40950ac16ee9"}, + {Type: "k8s", Value: "ns:sfh-199"}, + {Type: "k8s", Value: "pod-image-count:1"}, + {Type: "k8s", Value: "pod-image:gcr.io/spiffe-io/spire-agent:0.8.1"}, + {Type: "k8s", Value: "pod-image:gcr.io/spiffe-io/spire-agent@sha256:1e4c481d76e9ecbd3d8684891e0e46aa021a30920ca04936e1fdcc552747d941"}, + {Type: "k8s", Value: "pod-init-image-count:0"}, + {Type: "k8s", Value: "pod-label:app:sample-workload"}, + {Type: "k8s", Value: "pod-label:pod-template-hash:6658cb9566"}, + {Type: "k8s", Value: "pod-name:sample-workload-6658cb9566-5n4b4"}, + {Type: "k8s", Value: "pod-owner-uid:ReplicaSet:349d135e-3781-43e3-bc25-c900aedf1d0c"}, + {Type: "k8s", Value: "pod-owner:ReplicaSet:sample-workload-6658cb9566"}, + {Type: "k8s", Value: "pod-uid:a2830d0d-b0f0-4ff0-81b5-0ee4e299cf80"}, + {Type: "k8s", Value: "sa:default"}, + } + testInitPodSelectors = []*common.Selector{ {Type: "k8s", Value: "container-image:docker-pullable://quay.io/coreos/flannel@sha256:1b401bf0c30bada9a539389c3be652b58fe38463361edf488e6543c8761d4970"}, {Type: "k8s", Value: "container-image:quay.io/coreos/flannel:v0.9.0-amd64"}, @@ -188,6 +210,20 @@ func (s *Suite) TestAttestWithPidInKindPod() { s.requireAttestSuccessWithKindPod(p) } +func (s *Suite) TestAttestWithPidInCrioPod() { + s.startInsecureKubelet() + p := s.loadInsecurePlugin() + + s.requireAttestSuccessWithCrioPod(p) +} + +func (s *Suite) TestAttestFailDuplicateContainerId() { + s.startInsecureKubelet() + p := s.loadInsecurePlugin() + + s.requireAttestFailWithCrioPod(p) +} + func (s *Suite) TestAttestWithPidInPodSystemdCgroups() { s.startInsecureKubelet() p := s.loadInsecurePlugin() @@ -796,6 +832,18 @@ func (s *Suite) requireAttestSuccessWithKindPod(p workloadattestor.WorkloadAttes s.requireAttestSuccess(p, testKindPodSelectors) } +func (s *Suite) requireAttestSuccessWithCrioPod(p workloadattestor.WorkloadAttestor) { + s.addPodListResponse(crioPodListFilePath) + s.addCgroupsResponse(cgPidInCrioPodFilePath) + s.requireAttestSuccess(p, testCrioPodSelectors) +} + +func (s *Suite) requireAttestFailWithCrioPod(p workloadattestor.WorkloadAttestor) { + s.addPodListResponse(crioPodListDuplicateContainerIdFilePath) + s.addCgroupsResponse(cgPidInCrioPodFilePath) + s.requireAttestFailure(p, codes.Aborted, "Two pods found with same container Id") +} + func (s *Suite) requireAttestSuccessWithPodSystemdCgroups(p workloadattestor.WorkloadAttestor) { s.addPodListResponse(podListFilePath) s.addCgroupsResponse(cgSystemdPidInPodFilePath) @@ -909,6 +957,15 @@ func TestGetContainerIDFromCGroups(t *testing.T) { expectContainerID: "9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961", expectCode: codes.OK, }, + { + name: "cri-o", + cgroupPaths: []string{ + "0::/../crio-45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814.scope", + }, + expectPodUID: "", + expectContainerID: "45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814", + expectCode: codes.OK, + }, { name: "more than one container ID in cgroups", cgroupPaths: []string{ @@ -1021,6 +1078,12 @@ func TestGetPodUIDAndContainerIDFromCGroupPath(t *testing.T) { expectPodUID: "72f7f152-440c-66ac-9084-e0fc1d8a910c", expectContainerID: "b2a102854b4969b2ce98dc329c86b4fb2b06e4ad2cc8da9d8a7578c9cd2004a2", }, + { + name: "cri-o", + cgroupPath: "0::/../crio-45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814.scope", + expectPodUID: "", + expectContainerID: "45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814", + }, { name: "uid generateds by kubernetes", cgroupPath: "/kubepods/pod2732ca68f6358eba7703fb6f82a25c94", diff --git a/pkg/agent/plugin/workloadattestor/k8s/testdata/cgroups_pid_in_crio_pod.txt b/pkg/agent/plugin/workloadattestor/k8s/testdata/cgroups_pid_in_crio_pod.txt new file mode 100644 index 0000000000..dc8482af02 --- /dev/null +++ b/pkg/agent/plugin/workloadattestor/k8s/testdata/cgroups_pid_in_crio_pod.txt @@ -0,0 +1 @@ +0::/../crio-09bc3d7ade839efec32b6bec4ec79d099027a668ddba043083ec21d3c3b8f1e6.scope \ No newline at end of file diff --git a/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list.json b/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list.json new file mode 100644 index 0000000000..edabbb45c8 --- /dev/null +++ b/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list.json @@ -0,0 +1,157 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": "2019-09-20T06:13:48Z", + "generateName": "sample-workload-6658cb9566-", + "labels": { + "app": "sample-workload", + "pod-template-hash": "6658cb9566" + }, + "name": "sample-workload-6658cb9566-5n4b4", + "namespace": "sfh-199", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ReplicaSet", + "name": "sample-workload-6658cb9566", + "uid": "349d135e-3781-43e3-bc25-c900aedf1d0c" + } + ], + "resourceVersion": "17021", + "selfLink": "/api/v1/namespaces/sfh-199/pods/sample-workload-6658cb9566-5n4b4", + "uid": "a2830d0d-b0f0-4ff0-81b5-0ee4e299cf80" + }, + "spec": { + "containers": [ + { + "args": [ + "api", + "watch" + ], + "command": [ + "/opt/spire/bin/spire-agent" + ], + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imagePullPolicy": "IfNotPresent", + "name": "workload-api-client", + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/tmp/spire-agent/public", + "name": "spire-agent-socket", + "readOnly": true + }, + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-qfslv", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "nodeName": "a37b7d23-d32a-4932-8f33-40950ac16ee9", + "priority": 0, + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "hostPath": { + "path": "/run/spire-agent/public", + "type": "Directory" + }, + "name": "spire-agent-socket" + }, + { + "name": "default-token-qfslv", + "secret": { + "defaultMode": 420, + "secretName": "default-token-qfslv" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "containerd://09bc3d7ade839efec32b6bec4ec79d099027a668ddba043083ec21d3c3b8f1e6", + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imageID": "gcr.io/spiffe-io/spire-agent@sha256:1e4c481d76e9ecbd3d8684891e0e46aa021a30920ca04936e1fdcc552747d941", + "lastState": {}, + "name": "workload-api-client", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-09-20T06:13:49Z" + } + } + } + ], + "hostIP": "172.17.0.2", + "phase": "Running", + "podIP": "10.244.0.8", + "qosClass": "BestEffort", + "startTime": "2019-09-20T06:13:48Z" + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "", + "selfLink": "" + } +} diff --git a/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list_duplicate_containerId.json b/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list_duplicate_containerId.json new file mode 100644 index 0000000000..1f3ad0ec7a --- /dev/null +++ b/pkg/agent/plugin/workloadattestor/k8s/testdata/crio_pod_list_duplicate_containerId.json @@ -0,0 +1,305 @@ +{ + "apiVersion": "v1", + "items": [ + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": "2019-09-20T06:13:48Z", + "generateName": "sample-workload-6658cb9566-", + "labels": { + "app": "sample-workload", + "pod-template-hash": "6658cb9566" + }, + "name": "sample-workload-6658cb9566-5n4b4", + "namespace": "sfh-199", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ReplicaSet", + "name": "sample-workload-6658cb9566", + "uid": "349d135e-3781-43e3-bc25-c900aedf1d0c" + } + ], + "resourceVersion": "17021", + "selfLink": "/api/v1/namespaces/sfh-199/pods/sample-workload-6658cb9566-5n4b4", + "uid": "a2830d0d-b0f0-4ff0-81b5-0ee4e299cf80" + }, + "spec": { + "containers": [ + { + "args": [ + "api", + "watch" + ], + "command": [ + "/opt/spire/bin/spire-agent" + ], + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imagePullPolicy": "IfNotPresent", + "name": "workload-api-client", + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/tmp/spire-agent/public", + "name": "spire-agent-socket", + "readOnly": true + }, + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-qfslv", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "nodeName": "a37b7d23-d32a-4932-8f33-40950ac16ee9", + "priority": 0, + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "hostPath": { + "path": "/run/spire-agent/public", + "type": "Directory" + }, + "name": "spire-agent-socket" + }, + { + "name": "default-token-qfslv", + "secret": { + "defaultMode": 420, + "secretName": "default-token-qfslv" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "containerd://09bc3d7ade839efec32b6bec4ec79d099027a668ddba043083ec21d3c3b8f1e6", + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imageID": "gcr.io/spiffe-io/spire-agent@sha256:1e4c481d76e9ecbd3d8684891e0e46aa021a30920ca04936e1fdcc552747d941", + "lastState": {}, + "name": "workload-api-client", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-09-20T06:13:49Z" + } + } + } + ], + "hostIP": "172.17.0.2", + "phase": "Running", + "podIP": "10.244.0.8", + "qosClass": "BestEffort", + "startTime": "2019-09-20T06:13:48Z" + } + }, + { + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "creationTimestamp": "2019-09-20T06:13:48Z", + "generateName": "sample-workload-6658cb9566-", + "labels": { + "app": "sample-workload", + "pod-template-hash": "6658cb9566" + }, + "name": "sample-workload-6658cb9566-5n4b4", + "namespace": "sfh-199", + "ownerReferences": [ + { + "apiVersion": "apps/v1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "ReplicaSet", + "name": "sample-workload-6658cb9566", + "uid": "349d135e-3781-43e3-bc25-c900aedf1d0c" + } + ], + "resourceVersion": "17021", + "selfLink": "/api/v1/namespaces/sfh-199/pods/sample-workload-6658cb9566-5n4b4", + "uid": "72631393-dd79-49e5-8450-f68d930b93b4" + }, + "spec": { + "containers": [ + { + "args": [ + "api", + "watch" + ], + "command": [ + "/opt/spire/bin/spire-agent" + ], + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imagePullPolicy": "IfNotPresent", + "name": "workload-api-client", + "resources": {}, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/tmp/spire-agent/public", + "name": "spire-agent-socket", + "readOnly": true + }, + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-qfslv", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "nodeName": "a37b7d23-d32a-4932-8f33-40950ac16ee9", + "priority": 0, + "restartPolicy": "Always", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 30, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "hostPath": { + "path": "/run/spire-agent/public", + "type": "Directory" + }, + "name": "spire-agent-socket" + }, + { + "name": "default-token-qfslv", + "secret": { + "defaultMode": 420, + "secretName": "default-token-qfslv" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:49Z", + "status": "True", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2019-09-20T06:13:48Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "containerID": "containerd://09bc3d7ade839efec32b6bec4ec79d099027a668ddba043083ec21d3c3b8f1e6", + "image": "gcr.io/spiffe-io/spire-agent:0.8.1", + "imageID": "gcr.io/spiffe-io/spire-agent@sha256:1e4c481d76e9ecbd3d8684891e0e46aa021a30920ca04936e1fdcc552747d941", + "lastState": {}, + "name": "workload-api-client", + "ready": true, + "restartCount": 0, + "state": { + "running": { + "startedAt": "2019-09-20T06:13:49Z" + } + } + } + ], + "hostIP": "172.17.0.2", + "phase": "Running", + "podIP": "10.244.0.8", + "qosClass": "BestEffort", + "startTime": "2019-09-20T06:13:48Z" + } + } + ], + "kind": "List", + "metadata": { + "resourceVersion": "", + "selfLink": "" + } + } + \ No newline at end of file From 68007b223f276c655724fb0ef53e18f2064be702 Mon Sep 17 00:00:00 2001 From: Marcos Yacob Date: Fri, 8 Jul 2022 10:30:07 -0300 Subject: [PATCH 02/13] Add integration test to validate CRD mode (#3219) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add integration test for CRD mode Signed-off-by: Marcos Yacob Signed-off-by: Christian Görg --- test/integration/suites/k8s-crd-mode/00-setup | 32 ++ .../suites/k8s-crd-mode/01-apply-config | 35 ++ .../k8s-crd-mode/02-check-for-workload-svid | 30 ++ .../suites/k8s-crd-mode/Dockerfile | 4 + .../integration/suites/k8s-crd-mode/README.md | 11 + .../conf/admctrl/admission-control.yaml | 8 + .../k8s-crd-mode/conf/admctrl/kubeconfig.yaml | 9 + .../conf/agent/kustomization.yaml | 10 + .../k8s-crd-mode/conf/agent/spire-agent.yaml | 163 ++++++++++ .../suites/k8s-crd-mode/conf/kind-config.yaml | 20 ++ .../conf/server/kustomization.yaml | 10 + .../conf/server/spire-server.yaml | 306 ++++++++++++++++++ .../conf/spiffeid.spiffe.io_spiffeids.yaml | 108 +++++++ .../suites/k8s-crd-mode/conf/workload.yaml | 53 +++ .../suites/k8s-crd-mode/init-kubectl | 8 + test/integration/suites/k8s-crd-mode/teardown | 12 + 16 files changed, 819 insertions(+) create mode 100755 test/integration/suites/k8s-crd-mode/00-setup create mode 100755 test/integration/suites/k8s-crd-mode/01-apply-config create mode 100755 test/integration/suites/k8s-crd-mode/02-check-for-workload-svid create mode 100644 test/integration/suites/k8s-crd-mode/Dockerfile create mode 100644 test/integration/suites/k8s-crd-mode/README.md create mode 100644 test/integration/suites/k8s-crd-mode/conf/admctrl/admission-control.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/admctrl/kubeconfig.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/agent/kustomization.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/agent/spire-agent.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/kind-config.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/server/kustomization.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/server/spire-server.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/spiffeid.spiffe.io_spiffeids.yaml create mode 100644 test/integration/suites/k8s-crd-mode/conf/workload.yaml create mode 100644 test/integration/suites/k8s-crd-mode/init-kubectl create mode 100755 test/integration/suites/k8s-crd-mode/teardown diff --git a/test/integration/suites/k8s-crd-mode/00-setup b/test/integration/suites/k8s-crd-mode/00-setup new file mode 100755 index 0000000000..fd0e7ed42b --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/00-setup @@ -0,0 +1,32 @@ +#!/bin/bash + +# Create a temporary path that will be added to the PATH to avoid picking up +# binaries from the environment that aren't a version match. +mkdir -p ./bin + +docker build --target example-crd-agent -t example-crd-agent . + +KIND_PATH=./bin/kind +KUBECTL_PATH=./bin/kubectl + +# Download kind at the expected version at the given path. +download-kind "${KIND_PATH}" + +# Download kubectl at the expected version. +download-kubectl "${KUBECTL_PATH}" + +# We must supply an absolute path to the configuration directory. Replace the +# CONFDIR variable in the kind configuration with the conf directory of the +# running test. +sed -i.bak "s#CONFDIR#${PWD}/conf#g" conf/kind-config.yaml +rm conf/kind-config.yaml.bak + +# Start the kind cluster. +start-kind-cluster "${KIND_PATH}" k8stest ./conf/kind-config.yaml + +# Load the given images in the cluster. +container_images=("spire-server:latest-local" "spire-agent:latest-local" "k8s-workload-registrar:latest-local" "example-crd-agent:latest") +load-images "${KIND_PATH}" k8stest "${container_images[@]}" + +# Set the kubectl context. +set-kubectl-context "${KUBECTL_PATH}" kind-k8stest diff --git a/test/integration/suites/k8s-crd-mode/01-apply-config b/test/integration/suites/k8s-crd-mode/01-apply-config new file mode 100755 index 0000000000..3f85425307 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/01-apply-config @@ -0,0 +1,35 @@ +#!/bin/bash + +source init-kubectl + +wait-for-rollout() { + ns=$1 + obj=$2 + MAXROLLOUTCHECKS=12 + ROLLOUTCHECKINTERVAL=15s + for ((i=0; i<${MAXROLLOUTCHECKS}; i++)); do + log-info "checking rollout status for ${ns} ${obj}..." + if ./bin/kubectl "-n${ns}" rollout status "$obj" --timeout="${ROLLOUTCHECKINTERVAL}"; then + return + fi + log-warn "describing ${ns} ${obj}..." + ./bin/kubectl "-n${ns}" describe "$obj" || true + log-warn "logs for ${ns} ${obj}..." + ./bin/kubectl "-n${ns}" logs --all-containers "$obj" || true + done + fail-now "Failed waiting for ${obj} to roll out." +} + +./bin/kubectl apply -f ./conf/spiffeid.spiffe.io_spiffeids.yaml +./bin/kubectl create namespace spire +./bin/kubectl apply -k ./conf/server +wait-for-rollout spire deployment/spire-server + +./bin/kubectl apply -k ./conf/agent +wait-for-rollout spire daemonset/spire-agent + +# Apply this separately after all of the spire infrastructure has been rolled +# out, otherwise the k8s-workload-registrar might miss its chance to create +# an entry for it +./bin/kubectl apply -f ./conf/workload.yaml +wait-for-rollout spire deployment/example-workload diff --git a/test/integration/suites/k8s-crd-mode/02-check-for-workload-svid b/test/integration/suites/k8s-crd-mode/02-check-for-workload-svid new file mode 100755 index 0000000000..1539ae7787 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/02-check-for-workload-svid @@ -0,0 +1,30 @@ +#!/bin/sh + +source init-kubectl + +MAXFETCHCHECKS=60 +FETCHCHECKINTERVAL=1 +for ((i=1; i<=${MAXFETCHCHECKS}; i++)); do + EXAMPLEPOD=$(./bin/kubectl -nspire get pod -l app=example-workload -o jsonpath="{.items[0].metadata.name}") + log-info "checking for workload SPIFFE ID ($i of $MAXFETCHCHECKS max)..." + if ./bin/kubectl -nspire exec -t "${EXAMPLEPOD}" -- \ + /opt/spire/bin/spire-agent api fetch -write /tmp \ + | grep "SPIFFE ID:"; then + DONE=1 + + data=$(./bin/kubectl -nspire exec -t "${EXAMPLEPOD}" -- \ + openssl x509 -in /tmp/svid.0.pem -text -noout) + + echo $data | grep -q "URI:spiffe://example.org/workload" || fail-now "unexpected SPIFFE ID: $data" + echo $data | grep -q "DNS:dns1, DNS:dns2," || fail-now "unexpected DNS: $data" + + break + fi + sleep "${FETCHCHECKINTERVAL}" +done + +if [ "${DONE}" -eq 1 ]; then + log-info "SPIFFE ID found." +else + fail-now "timed out waiting for workload to obtain credentials." +fi diff --git a/test/integration/suites/k8s-crd-mode/Dockerfile b/test/integration/suites/k8s-crd-mode/Dockerfile new file mode 100644 index 0000000000..bc76a647c4 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/Dockerfile @@ -0,0 +1,4 @@ +FROM spire-agent:latest-local AS example-crd-agent +RUN apk add --update openssl && \ + rm -rf /var/cache/apk/* +CMD [] diff --git a/test/integration/suites/k8s-crd-mode/README.md b/test/integration/suites/k8s-crd-mode/README.md new file mode 100644 index 0000000000..a422a985ff --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/README.md @@ -0,0 +1,11 @@ +# Kubernetes with CRD mode Suite + +## Description + +This suite sets up a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io) and asserts the following: + +* SPIRE server attests SPIRE agents by verifying Kubernetes Projected Service + Account Tokens (i.e. `k8s_psat`) via the Token Review API. +* Workloads are registered via the K8S Workload Registrar (crd mode) and are able to + obtain identities with expected DNS and SPIFFE ID without the need for manually maintained registration + entries. diff --git a/test/integration/suites/k8s-crd-mode/conf/admctrl/admission-control.yaml b/test/integration/suites/k8s-crd-mode/conf/admctrl/admission-control.yaml new file mode 100644 index 0000000000..05480c2df9 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/admctrl/admission-control.yaml @@ -0,0 +1,8 @@ +apiVersion: apiserver.k8s.io/v1alpha1 +kind: AdmissionConfiguration +plugins: +- name: ValidatingAdmissionWebhook + configuration: + apiVersion: apiserver.config.k8s.io/v1alpha1 + kind: WebhookAdmission + kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml diff --git a/test/integration/suites/k8s-crd-mode/conf/admctrl/kubeconfig.yaml b/test/integration/suites/k8s-crd-mode/conf/admctrl/kubeconfig.yaml new file mode 100644 index 0000000000..72942c5ae7 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/admctrl/kubeconfig.yaml @@ -0,0 +1,9 @@ +# KubeConfig with client credentials for the API Server to use to call the +# K8S Workload Registrar service +apiVersion: v1 +kind: Config +users: +- name: k8s-workload-registrar.spire.svc + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ1VENDQVYrZ0F3SUJBZ0lJVVNIdmpGQTFxRHd3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93S0RFbU1DUUdBMVVFQXhNZFN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClRFbEZUbFF3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYKMXk0VDVKTVdBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQpvM1V3Y3pBT0JnTlZIUThCQWY4RUJBTUNBNmd3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdJd0RBWURWUjBUCkFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVW9EYlBiOUpWNXhqZlZVMnBhSzd2UUNsZ2d3SXdId1lEVlIwakJCZ3cKRm9BVW02eFNULzJCUzRYdmhVcXVzaDJCTEwwdlJNSXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWdHNzRQeWkyZQpONlBEcVRGRnY1UDFjNFhjVVdERzMwdzJIZEU4Wm8rMStVWUNJUURUL2xMa2dUUjUzV01INVRqWkllblhmYzFjCmxkMGlqSmpvRFJIR3lIRjJxdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1BhSWtTTVowUmduQllWYncKMDIrdlN5UUpDM2RtZ0VDNFBLN2svTnk4Qnh1aFJBTkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYxeTRUNUpNVwpBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/test/integration/suites/k8s-crd-mode/conf/agent/kustomization.yaml b/test/integration/suites/k8s-crd-mode/conf/agent/kustomization.yaml new file mode 100644 index 0000000000..571ffcf771 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/agent/kustomization.yaml @@ -0,0 +1,10 @@ +# kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# list of Resource Config to be Applied +resources: + - spire-agent.yaml + +# namespace to deploy all Resources to +namespace: spire diff --git a/test/integration/suites/k8s-crd-mode/conf/agent/spire-agent.yaml b/test/integration/suites/k8s-crd-mode/conf/agent/spire-agent.yaml new file mode 100644 index 0000000000..ca5fa1f1c7 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/agent/spire-agent.yaml @@ -0,0 +1,163 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods","nodes","nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/api.sock" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + # Defaults to the secure kubelet port by default. + # Minikube does not have a cert in the cluster CA bundle that + # can authenticate the kubelet cert, so skip validation. + skip_kubelet_verification = true + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8089" + live_path = "/live" + ready_path = "/ready" + } + + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + initContainers: + - name: init + # This is a small image with wait-for-it, choose whatever image + # you prefer that waits for a service to be up. This image is built + # from https://github.com/lqhl/wait-for-it + image: gcr.io/spiffe-io/wait-for-it + args: ["-t", "30", "spire-server:8081"] + containers: + - name: spire-agent + image: spire-agent:latest-local + imagePullPolicy: Never + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-agent-socket + mountPath: /run/spire/sockets + readOnly: false + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-token + mountPath: /var/run/secrets/tokens + livenessProbe: + httpGet: + path: /live + port: 8089 + initialDelaySeconds: 10 + periodSeconds: 10 + readinessProbe: + httpGet: + path: /ready + port: 8089 + initialDelaySeconds: 10 + periodSeconds: 10 + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-agent-socket + hostPath: + path: /run/spire/sockets + type: DirectoryOrCreate + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server diff --git a/test/integration/suites/k8s-crd-mode/conf/kind-config.yaml b/test/integration/suites/k8s-crd-mode/conf/kind-config.yaml new file mode 100644 index 0000000000..173445df06 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/kind-config.yaml @@ -0,0 +1,20 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +kubeadmConfigPatches: +- | + apiVersion: kubeadm.k8s.io/v1beta2 + kind: ClusterConfiguration + metadata: + name: config + apiServer: + extraArgs: + "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" + "service-account-issuer": "api" + "service-account-api-audiences": "api,spire-server" + "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" +nodes: +- role: control-plane + image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6 + extraMounts: + - containerPath: /etc/kubernetes/pki/admctrl + hostPath: CONFDIR/admctrl diff --git a/test/integration/suites/k8s-crd-mode/conf/server/kustomization.yaml b/test/integration/suites/k8s-crd-mode/conf/server/kustomization.yaml new file mode 100644 index 0000000000..c87a9a25d0 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/server/kustomization.yaml @@ -0,0 +1,10 @@ +# kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +# list of Resource Config to be Applied +resources: + - spire-server.yaml + +# namespace to deploy all Resources to +namespace: spire diff --git a/test/integration/suites/k8s-crd-mode/conf/server/spire-server.yaml b/test/integration/suites/k8s-crd-mode/conf/server/spire-server.yaml new file mode 100644 index 0000000000..bc2c3c11f3 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/server/spire-server.yaml @@ -0,0 +1,306 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["get"] +- apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "patch"] +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["create"] +- apiGroups: ["admissionregistration.k8s.io"] + resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] + verbs: ["get", "list", "patch", "watch"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_subject = { + country = ["US"], + organization = ["SPIFFE"], + common_name = "", + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + webhook_label = "spiffe.io/webhook" + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8089" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: k8s-workload-registrar-role +rules: +- apiGroups: [""] + resources: ["endpoints", "nodes", "pods"] + verbs: ["get", "list", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids"] + verbs: ["create", "delete", "get", "list", "patch", "update", "watch"] +- apiGroups: ["spiffeid.spiffe.io"] + resources: ["spiffeids/status"] + verbs: ["get", "patch", "update"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: k8s-workload-registrar-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: k8s-workload-registrar-role +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire + +--- + +apiVersion: v1 +kind: ConfigMap +metadata: + name: k8s-workload-registrar + namespace: spire +data: + k8s-workload-registrar.conf: | + log_level = "debug" + trust_domain = "example.org" + server_socket_path = "/tmp/spire-server/private/api.sock" + cluster = "example-cluster" + pod_controller = true + add_svc_dns_names = true + mode = "crd" + webhook_enabled = true + identity_template = "ns/{{.Pod.Namespace}}/pod/{{.Pod.Name}}" + identity_template_label = "spiffe.io/spiffe-id" + +--- + +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: k8s-workload-registrar + labels: + spiffe.io/webhook: "true" +webhooks: +- name: k8s-workload-registrar.spire.svc + admissionReviewVersions: ["v1", "v1beta1"] + clientConfig: + service: + name: k8s-workload-registrar + namespace: spire + path: "/validate-spiffeid-spiffe-io-v1beta1-spiffeid" + rules: + - apiGroups: ["spiffeid.spiffe.io"] + apiVersions: ["v1beta1"] + operations: ["CREATE", "UPDATE", "DELETE"] + resources: ["spiffeids"] + scope: Namespaced + sideEffects: None + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: spire-server:latest-local + imagePullPolicy: Never + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-registration-socket + mountPath: /tmp + readOnly: false + livenessProbe: + httpGet: + path: /live + port: 8089 + initialDelaySeconds: 5 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /ready + port: 8089 + initialDelaySeconds: 5 + periodSeconds: 5 + - name: k8s-workload-registrar + image: k8s-workload-registrar:latest-local + imagePullPolicy: Never + args: ["-config", "/run/spire/config/k8s-workload-registrar.conf"] + env: + - name: MY_POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + ports: + - containerPort: 9443 + name: webhook + protocol: TCP + volumeMounts: + - mountPath: /run/spire/config + name: k8s-workload-registrar-config + readOnly: true + - mountPath: /run/spire/sockets + name: spire-agent-socket + readOnly: true + - name: spire-registration-socket + mountPath: /tmp + readOnly: false + volumes: + - name: spire-config + configMap: + name: spire-server + - name: spire-agent-socket + hostPath: + path: /run/spire/sockets + type: DirectoryOrCreate + - name: k8s-workload-registrar-config + configMap: + name: k8s-workload-registrar + - name: spire-registration-socket + emptyDir: {} + +--- + +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server + +--- + +apiVersion: v1 +kind: Service +metadata: + name: k8s-workload-registrar + namespace: spire +spec: + ports: + - name: webhook + protocol: TCP + port: 443 + targetPort: 9443 + selector: + app: spire-server diff --git a/test/integration/suites/k8s-crd-mode/conf/spiffeid.spiffe.io_spiffeids.yaml b/test/integration/suites/k8s-crd-mode/conf/spiffeid.spiffe.io_spiffeids.yaml new file mode 100644 index 0000000000..a166c5da60 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/spiffeid.spiffe.io_spiffeids.yaml @@ -0,0 +1,108 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.4 + name: spiffeids.spiffeid.spiffe.io +spec: + group: spiffeid.spiffe.io + names: + kind: SpiffeID + listKind: SpiffeIDList + plural: spiffeids + singular: spiffeid + scope: Namespaced + versions: + - name: v1beta1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + description: SpiffeID is the Schema for the spiffeid API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SpiffeIDSpec defines the desired state of SpiffeID + properties: + dnsNames: + items: + type: string + type: array + federatesWith: + items: + type: string + type: array + parentId: + type: string + downstream: + type: boolean + selector: + properties: + arbitrary: + description: Arbitrary selectors + items: + type: string + type: array + containerImage: + description: Container image to match for this spiffe ID + type: string + containerName: + description: Container name to match for this spiffe ID + type: string + namespace: + description: Namespace to match for this spiffe ID + type: string + nodeName: + description: Node name to match for this spiffe ID + type: string + podLabel: + additionalProperties: + type: string + description: Pod label name/value to match for this spiffe ID + type: object + podName: + description: Pod name to match for this spiffe ID + type: string + podUid: + description: Pod UID to match for this spiffe ID + type: string + serviceAccount: + description: ServiceAccount to match for this spiffe ID + type: string + cluster: + description: The k8s_psat cluster name + type: string + agent_node_uid: + description: UID of the node + type: string + type: object + spiffeId: + type: string + required: + - parentId + - selector + - spiffeId + type: object + status: + description: SpiffeIDStatus defines the observed state of SpiffeID + properties: + entryId: + description: 'INSERT ADDITIONAL STATUS FIELD - define observed state + of cluster Important: Run "make" to regenerate code after modifying + this file' + type: string + type: object + type: object diff --git a/test/integration/suites/k8s-crd-mode/conf/workload.yaml b/test/integration/suites/k8s-crd-mode/conf/workload.yaml new file mode 100644 index 0000000000..e75381ab1d --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/conf/workload.yaml @@ -0,0 +1,53 @@ +apiVersion: spiffeid.spiffe.io/v1beta1 +kind: SpiffeID +metadata: + name: example-workload + namespace: spire +spec: + parentId: spiffe://example.org/k8s-workload-registrar/example-cluster/node/k8stest-control-plane + selector: + namespace: spire + serviceAccount: default + dnsNames: + - dns1 + - dns2 + spiffeId: spiffe://example.org/workload + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: example-workload + namespace: spire + labels: + app: example-workload +spec: + selector: + matchLabels: + app: example-workload + template: + metadata: + namespace: spire + labels: + app: example-workload + spire-workload: example-workload + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + containers: + - name: example-workload + imagePullPolicy: Never + image: example-crd-agent:latest + command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] + args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] + volumeMounts: + - name: spire-agent-socket + mountPath: /tmp/spire-agent/public + readOnly: true + volumes: + - name: spire-agent-socket + hostPath: + path: /run/spire/sockets + type: Directory diff --git a/test/integration/suites/k8s-crd-mode/init-kubectl b/test/integration/suites/k8s-crd-mode/init-kubectl new file mode 100644 index 0000000000..b689f1f417 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/init-kubectl @@ -0,0 +1,8 @@ +#!/bin/bash + +KUBECONFIG="${RUNDIR}/kubeconfig" +if [ ! -f "${RUNDIR}/kubeconfig" ]; then + ./bin/kind get kubeconfig --name=k8stest > "${RUNDIR}/kubeconfig" +fi +export KUBECONFIG + diff --git a/test/integration/suites/k8s-crd-mode/teardown b/test/integration/suites/k8s-crd-mode/teardown new file mode 100755 index 0000000000..d0c69ac504 --- /dev/null +++ b/test/integration/suites/k8s-crd-mode/teardown @@ -0,0 +1,12 @@ +#!/bin/bash + +source init-kubectl + +if [ -z "$SUCCESS" ]; then + ./bin/kubectl -nspire logs deployment/spire-server --all-containers || true + ./bin/kubectl -nspire logs daemonset/spire-agent --all-containers || true + ./bin/kubectl -nspire logs deployment/example-workload --all-containers || true +fi + +export KUBECONFIG= +./bin/kind delete cluster --name k8stest From dfdc9d6e7f1f599c6a127a6de0734b5ec87f172a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 8 Jul 2022 12:29:11 -0600 Subject: [PATCH 03/13] Bump github.com/open-policy-agent/opa from 0.42.0 to 0.42.1 (#3232) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/open-policy-agent/opa](https://github.com/open-policy-agent/opa) from 0.42.0 to 0.42.1. - [Release notes](https://github.com/open-policy-agent/opa/releases) - [Changelog](https://github.com/open-policy-agent/opa/blob/main/CHANGELOG.md) - [Commits](https://github.com/open-policy-agent/opa/compare/v0.42.0...v0.42.1) --- updated-dependencies: - dependency-name: github.com/open-policy-agent/opa dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: Christian Görg --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e4e444f88b..96b4e6c371 100644 --- a/go.mod +++ b/go.mod @@ -53,7 +53,7 @@ require ( github.com/lib/pq v1.10.6 github.com/mattn/go-sqlite3 v1.14.14 github.com/mitchellh/cli v1.1.4 - github.com/open-policy-agent/opa v0.42.0 + github.com/open-policy-agent/opa v0.42.1 github.com/prometheus/client_golang v1.12.2 github.com/shirou/gopsutil/v3 v3.22.6 github.com/sirupsen/logrus v1.8.1 diff --git a/go.sum b/go.sum index 70503ea98f..f4934cac48 100644 --- a/go.sum +++ b/go.sum @@ -1094,8 +1094,8 @@ github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDs github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/open-policy-agent/opa v0.42.0 h1:CTJ240+A+sZEYSuLDYiT5l8Q3lcQf2eZc53jCbWNjZE= -github.com/open-policy-agent/opa v0.42.0/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= +github.com/open-policy-agent/opa v0.42.1 h1:5R5hVrxgLFK3lxPAcel7iWz3TzbumAZj8qunOIx6fhk= +github.com/open-policy-agent/opa v0.42.1/go.mod h1:MrmoTi/BsKWT58kXlVayBb+rYVeaMwuBm3nYAN3923s= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= From 8b197e1d81cce9937ffbd95c54bc4fa9424aca38 Mon Sep 17 00:00:00 2001 From: Andrew Harding Date: Mon, 11 Jul 2022 10:04:00 -0600 Subject: [PATCH 04/13] Bump version to 1.4.0 (#3234) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also pulled in CHANGELOG for 1.3.2 and updated upgrade IT Signed-off-by: Andrew Harding Signed-off-by: Christian Görg --- CHANGELOG.md | 14 ++++++++++++++ pkg/common/version/version.go | 2 +- test/integration/suites/upgrade/versions.txt | 1 + 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index af540e374a..d08884ecc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,19 @@ # Changelog +## [1.3.2] - 2022-07-08 + +### Added +- Support for K8s workload attestation when the Kubelet is run as a standalone component (#3163) +- Optional health check endpoints to the OIDC Discovery Provider (#3151) +- Pagination support to the server `entry show` command (#3135) + +### Fixed +- A regression in workload SVID minting that caused DNS names not to be set in the SVID (#3215) +- A regression in the server that caused a panic instead of a clean shutdown if a plugin was misconfigured (#3166) + +### Changed +- Directories for UDS endpoints are no longer created by SPIRE on Windows (#3192) + ## [1.3.1] - 2022-06-09 ### Added diff --git a/pkg/common/version/version.go b/pkg/common/version/version.go index e586711ed3..fe3d1f7228 100644 --- a/pkg/common/version/version.go +++ b/pkg/common/version/version.go @@ -8,7 +8,7 @@ const ( // IMPORTANT: When updating, make sure to reconcile the versions list that // is part of the upgrade integration test. See // test/integration/suites/upgrade/README.md for details. - Base = "1.3.2" + Base = "1.4.0" ) var ( diff --git a/test/integration/suites/upgrade/versions.txt b/test/integration/suites/upgrade/versions.txt index e8bd3c7758..e58b1417e9 100644 --- a/test/integration/suites/upgrade/versions.txt +++ b/test/integration/suites/upgrade/versions.txt @@ -5,3 +5,4 @@ 1.2.4 1.3.0 1.3.1 +1.3.2 From 7d107a9a593e458dd41363ec622581fb82c4fe51 Mon Sep 17 00:00:00 2001 From: Andrew Harding Date: Tue, 12 Jul 2022 12:26:34 -0600 Subject: [PATCH 05/13] Remove webhook mode from k8s-workload-registrar (#3235) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove webhook mode from k8s-workload-registrar This was deprecated in 1.3.0. Removing for 1.4.0. Signed-off-by: Andrew Harding * Address PR comments Signed-off-by: Andrew Harding Signed-off-by: Christian Görg --- support/k8s/k8s-workload-registrar/README.md | 70 +- support/k8s/k8s-workload-registrar/config.go | 16 +- .../k8s/k8s-workload-registrar/config_test.go | 51 +- .../k8s-workload-registrar/config_webhook.go | 95 --- .../k8s/k8s-workload-registrar/controller.go | 307 -------- .../k8s-workload-registrar/controller_test.go | 717 ------------------ .../k8s-workload-registrar/generate-config.go | 338 --------- .../k8s-workload-registrar/mode-crd/README.md | 11 + support/k8s/k8s-workload-registrar/server.go | 106 --- .../k8s/k8s-workload-registrar/server_test.go | 267 ------- .../k8s-workload-registrar/webhookhandler.go | 204 ----- .../webhookhandler_test.go | 143 ---- test/integration/README.md | 2 +- test/integration/suites/k8s-scratch/00-setup | 30 - .../suites/k8s-scratch/01-apply-config | 34 - .../k8s-scratch/02-check-for-workload-svid | 23 - test/integration/suites/k8s-scratch/README.md | 20 - .../conf/admctrl/admission-control.yaml | 8 - .../k8s-scratch/conf/admctrl/kubeconfig.yaml | 9 - .../k8s-scratch/conf/agent/kustomization.yaml | 10 - .../k8s-scratch/conf/agent/spire-agent.yaml | 167 ---- .../suites/k8s-scratch/conf/kind-config.yaml | 20 - .../server/k8s-workload-registrar-secret.yaml | 9 - .../conf/server/kustomization.yaml | 11 - .../k8s-scratch/conf/server/spire-server.yaml | 334 -------- .../conf/webhook/kustomization.yaml | 10 - .../conf/webhook/validation-webhook.yaml | 22 - .../suites/k8s-scratch/conf/workload.yaml | 35 - .../suites/k8s-scratch/init-kubectl | 8 - test/integration/suites/k8s-scratch/teardown | 12 - test/integration/suites/k8s/00-setup | 30 - test/integration/suites/k8s/01-apply-config | 34 - .../suites/k8s/02-check-for-workload-svid | 23 - test/integration/suites/k8s/README.md | 11 - .../k8s/conf/admctrl/admission-control.yaml | 8 - .../suites/k8s/conf/admctrl/kubeconfig.yaml | 9 - .../suites/k8s/conf/agent/kustomization.yaml | 10 - .../suites/k8s/conf/agent/spire-agent.yaml | 167 ---- .../suites/k8s/conf/kind-config.yaml | 20 - .../server/k8s-workload-registrar-secret.yaml | 9 - .../suites/k8s/conf/server/kustomization.yaml | 11 - .../suites/k8s/conf/server/spire-server.yaml | 332 -------- .../k8s/conf/webhook/kustomization.yaml | 10 - .../k8s/conf/webhook/validation-webhook.yaml | 25 - .../integration/suites/k8s/conf/workload.yaml | 35 - test/integration/suites/k8s/init-kubectl | 8 - test/integration/suites/k8s/teardown | 12 - 47 files changed, 53 insertions(+), 3790 deletions(-) delete mode 100644 support/k8s/k8s-workload-registrar/config_webhook.go delete mode 100644 support/k8s/k8s-workload-registrar/controller.go delete mode 100644 support/k8s/k8s-workload-registrar/controller_test.go delete mode 100644 support/k8s/k8s-workload-registrar/generate-config.go delete mode 100644 support/k8s/k8s-workload-registrar/server.go delete mode 100644 support/k8s/k8s-workload-registrar/server_test.go delete mode 100644 support/k8s/k8s-workload-registrar/webhookhandler.go delete mode 100644 support/k8s/k8s-workload-registrar/webhookhandler_test.go delete mode 100755 test/integration/suites/k8s-scratch/00-setup delete mode 100755 test/integration/suites/k8s-scratch/01-apply-config delete mode 100755 test/integration/suites/k8s-scratch/02-check-for-workload-svid delete mode 100644 test/integration/suites/k8s-scratch/README.md delete mode 100644 test/integration/suites/k8s-scratch/conf/admctrl/admission-control.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/admctrl/kubeconfig.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/agent/kustomization.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/agent/spire-agent.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/kind-config.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/server/k8s-workload-registrar-secret.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/server/kustomization.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/server/spire-server.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/webhook/kustomization.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/webhook/validation-webhook.yaml delete mode 100644 test/integration/suites/k8s-scratch/conf/workload.yaml delete mode 100644 test/integration/suites/k8s-scratch/init-kubectl delete mode 100755 test/integration/suites/k8s-scratch/teardown delete mode 100755 test/integration/suites/k8s/00-setup delete mode 100755 test/integration/suites/k8s/01-apply-config delete mode 100755 test/integration/suites/k8s/02-check-for-workload-svid delete mode 100644 test/integration/suites/k8s/README.md delete mode 100644 test/integration/suites/k8s/conf/admctrl/admission-control.yaml delete mode 100644 test/integration/suites/k8s/conf/admctrl/kubeconfig.yaml delete mode 100644 test/integration/suites/k8s/conf/agent/kustomization.yaml delete mode 100644 test/integration/suites/k8s/conf/agent/spire-agent.yaml delete mode 100644 test/integration/suites/k8s/conf/kind-config.yaml delete mode 100644 test/integration/suites/k8s/conf/server/k8s-workload-registrar-secret.yaml delete mode 100644 test/integration/suites/k8s/conf/server/kustomization.yaml delete mode 100644 test/integration/suites/k8s/conf/server/spire-server.yaml delete mode 100644 test/integration/suites/k8s/conf/webhook/kustomization.yaml delete mode 100644 test/integration/suites/k8s/conf/webhook/validation-webhook.yaml delete mode 100644 test/integration/suites/k8s/conf/workload.yaml delete mode 100644 test/integration/suites/k8s/init-kubectl delete mode 100755 test/integration/suites/k8s/teardown diff --git a/support/k8s/k8s-workload-registrar/README.md b/support/k8s/k8s-workload-registrar/README.md index a9e25a2dd6..28066ccc81 100644 --- a/support/k8s/k8s-workload-registrar/README.md +++ b/support/k8s/k8s-workload-registrar/README.md @@ -31,19 +31,9 @@ The configuration file is a **required** by the registrar. It contains | `cluster` | string | required | Logical cluster to register nodes/workloads under. Must match the SPIRE SERVER PSAT node attestor configuration. | | | `pod_label` | string | optional | The pod label used for [Label Based Workload Registration](#label-based-workload-registration) | | | `pod_annotation` | string | optional | The pod annotation used for [Annotation Based Workload Registration](#annotation-based-workload-registration) | | -| `mode` | string | optional | How to run the registrar, either using a `"webhook"`, `"reconcile`" or `"crd"`. See [Differences](#differences-between-modes) for more details. | `"webhook"` | +| `mode` | string | required | How to run the registrar, either `"reconcile"` or `"crd"`. See [Differences](#differences-between-modes) for more details. | | | `disabled_namespaces` | []string | optional | Comma seperated list of namespaces to disable auto SVID generation for | `"kube-system", "kube-public"` | -The following configuration directives are specific to `"webhook"` mode: - -| Key | Type | Required? | Description | Default | -| -------------------------- | --------| ---------| ----------------------------------------- | ------- | -| `addr` | string | required | Address to bind the HTTPS listener to | `":8443"` | -| `cert_path` | string | required | Path on disk to the PEM-encoded server TLS certificate | `"cert.pem"` | -| `key_path` | string | required | Path on disk to the PEM-encoded server TLS key | `"key.pem"` | -| `cacert_path` | string | required | Path on disk to the CA certificate used to verify the client (i.e. API server) | `"cacert.pem"` | -| `insecure_skip_client_verification` | boolean | required | If true, skips client certificate verification (in which case `cacert_path` is ignored). See [Security Considerations](#security-considerations) for more details. | `false` | - The following configuration directives are specific to `"reconcile"` mode: | Key | Type | Required? | Description | Default | @@ -67,16 +57,15 @@ cluster = "production" ``` ## Workload Registration -When running in webhook, reconcile, or crd mode with `pod_controller=true` entries will be automatically created for +When running in reconcile or crd mode with `pod_controller=true` entries will be automatically created for Pods. The available workload registration modes are: | Registration Mode | pod_label | pod_annotation | identity_template | Service Account Based | | ----------------- | --------- | -------------- | ----------------- | --------------- | -| `webhook` | as specified by pod_label | as specified by pod_annotation | _unavailable_ | service account | | `reconcile` | as specified by pod_label | as specified by pod_annotation | _unavailable_ | service account | | `crd` | as specified by pod_label | as specified by pod_annotation | as specified by identity_template | _unavailable_ | -If using `webhook` and `reconcile` modes with [Service Account Based SPIFFE IDs](#service-account-based-workload-registration), don't specify either `pod_label` or `pod_annotation`. If you use Label Based SPIFFE IDs, specify only `pod_label`. If you use Annotation Based SPIFFE IDs, specify only `pod_annotation`. +If using the `reconcile` mode with [Service Account Based SPIFFE IDs](#service-account-based-workload-registration), don't specify either `pod_label` or `pod_annotation`. If you use Label Based SPIFFE IDs, specify only `pod_label`. If you use Annotation Based SPIFFE IDs, specify only `pod_annotation`. For `crd` mode, if neither `pod_label` nor `pod_annotation` workload registration mode is selected, @@ -188,41 +177,6 @@ An example can be found in `mode-reconcile/config/role.yaml`, which you would ap See [Quick Start for CRD Kubernetes Workload Registrar](mode-crd/README.md#quick-start) -### Webhook Mode Configuration -The registrar will need access to its server keypair and the CA certificate it uses to verify clients. - -The following K8S objects are required to set up the validating admission controller: -* `Service` pointing to the registrar port within the spire-server container -* `ValidatingWebhookConfiguration` configuring the registrar as a validating admission controller. - -Additionally, unless you disable client authentication (`insecure_skip_client_verification`), you will need: -* `Config` with a user entry for the registrar service client containing the client certificate/key the API server should use to authenticate with the registrar. -* `AdmissionConfiguration` describing where the API server can locate the file containing the `Config`. This file is passed to the API server via the `--admission-control-config-file` flag. - -For convenience, a command line utility is provided to generate authentication -material and relevant Kubernetes configuration YAML. - -``` -$ go run generate-config.go -.... YAML configuration dump .... -``` - -#### Webhook mode Security Considerations - -The registrar authenticates clients by default. This is a very important aspect -of the overall security of the registrar since the registrar can be used to -provide indirect access to the SPIRE server API, albeit scoped. It is *NOT* -recommended to skip client verification (via the -`insecure_skip_client_verification` configurable) unless you fully understand -the risks. - -#### Migrating away from the webhook - -The k8s ValidatingWebhookConfiguration will need to be removed or pods may fail admission. If you used the default -configuration this can be done with: - -`kubectl validatingwebhookconfiguration delete k8s-workload-registrar-webhook` - ## DNS names Both `"reconcile"` and `"crd"` mode provide the ability to add DNS names to registration entries for pods. They @@ -238,19 +192,11 @@ to entries. This is known to affect etcd. ## Differences between modes -The `"webhook"` mode uses a Validating Admission Webhook to capture pod creation/deletion events at admission time. It -was the first of the registrar implementations, but suffers from the following problems: -* Race conditions between add and delete for StatefulSets will regularly lead to StatefulSets without entries; -* Unavailability of the webhook either has to block admission entirely, or you'll end up with pods with no entries; -* Spire server errors have to block admission entirely, or you'll end up with pods with no entries; -* It will not clean up left behind entries for pods deleted while the webhook/spire-server was unavailable; -* Entries are not parented to individual Nodes, all SVIDs are flooded to all agents in a cluster, which severely limits scalability. -Use of the `"webhook"` mode is thus strongly discouraged, but it remains the default for backward compatibility reasons. - -The `"reconcile"` mode and `"crd"` mode both make use of reconciling controllers instead of webhooks. `"reconcile"` mode, -and `"crd"` mode with the pod_controller enabled, have similar automated workload creation functionality to webhook, but -they do not suffer from the same race conditions, are capable of recovering from (and cleaning up after) failure of the registrar, -and both also ensure that automatically created entries for Pods are limited to the appropriate Nodes to prevent SVID +The `"reconcile"` and `"crd"` modes both make use of reconciling controllers. Both modes, +with the pod_controller enabled, have similar automated workload creation +functionality and are capable of recovering from (and cleaning up after) +failure of the registrar. Each also ensure that automatically created +entries for Pods are limited to the appropriate Nodes to prevent SVID flooding. When used in this way, `"reconcile"` may be slightly faster to create new entries than `"crd"` mode, and requires less configuration. diff --git a/support/k8s/k8s-workload-registrar/config.go b/support/k8s/k8s-workload-registrar/config.go index fe5864efc8..31c16a623d 100644 --- a/support/k8s/k8s-workload-registrar/config.go +++ b/support/k8s/k8s-workload-registrar/config.go @@ -26,9 +26,7 @@ const ( defaultLogLevel = "info" modeCRD = "crd" - modeWebhook = "webhook" modeReconcile = "reconcile" - defaultMode = modeWebhook ) type Mode interface { @@ -58,7 +56,6 @@ type CommonMode struct { } func (c *CommonMode) ParseConfig(hclConfig string) (err error) { - c.Mode = defaultMode if err := hcl.Decode(c, hclConfig); err != nil { return errs.New("unable to decode configuration: %v", err) } @@ -89,8 +86,11 @@ func (c *CommonMode) ParseConfig(hclConfig string) (err error) { if c.PodLabel != "" && c.PodAnnotation != "" { return errs.New("workload registration mode specification is incorrect, can't specify both pod_label and pod_annotation") } - if c.Mode != modeCRD && c.Mode != modeWebhook && c.Mode != modeReconcile { - return errs.New("invalid mode \"%s\", valid values are %s, %s and %s", c.Mode, modeCRD, modeWebhook, modeReconcile) + if c.Mode == "" { + return errs.New("mode must be specified, valid values are %q and %q", modeCRD, modeReconcile) + } + if c.Mode != modeCRD && c.Mode != modeReconcile { + return errs.New("invalid mode %q, valid values are %q and %q", c.Mode, modeCRD, modeReconcile) } if c.DisabledNamespaces == nil { c.DisabledNamespaces = defaultDisabledNamespaces() @@ -137,9 +137,9 @@ func LoadMode(path string) (Mode, error) { CommonMode: *c, } default: - mode = &WebhookMode{ - CommonMode: *c, - } + // This case is defensive since ParseConfig ensures we have + // a valid mode value. + return nil, errs.New("unknown mode: %q", c.Mode) } err = mode.ParseConfig(string(hclBytes)) diff --git a/support/k8s/k8s-workload-registrar/config_test.go b/support/k8s/k8s-workload-registrar/config_test.go index 7912e93242..643e664fe7 100644 --- a/support/k8s/k8s-workload-registrar/config_test.go +++ b/support/k8s/k8s-workload-registrar/config_test.go @@ -15,6 +15,7 @@ var ( trust_domain = "domain.test" cluster = "CLUSTER" server_socket_path = "SOCKETPATH" + mode = "reconcile" ` ) @@ -35,66 +36,65 @@ func TestLoadMode(t *testing.T) { config, err := LoadMode(confPath) require.NoError(err) - require.Equal(&WebhookMode{ + require.Equal(&ReconcileMode{ CommonMode: CommonMode{ ServerSocketPath: "SOCKETPATH", ServerAddress: "unix://SOCKETPATH", TrustDomain: "domain.test", Cluster: "CLUSTER", LogLevel: defaultLogLevel, - Mode: "webhook", + Mode: "reconcile", DisabledNamespaces: []string{"kube-system", "kube-public"}, trustDomain: spiffeid.RequireTrustDomainFromString("domain.test"), }, - Addr: ":8443", - CertPath: defaultCertPath, - KeyPath: defaultKeyPath, - CaCertPath: defaultCaCertPath, + ControllerName: "spire-k8s-registrar", + ClusterDNSZone: "cluster.local", + LeaderElectionResourceLock: "configmaps", + MetricsAddr: ":8080", }, config) testCases := []struct { name string in string - out *WebhookMode + out *ReconcileMode err string }{ { name: "defaults", in: testMinimalConfig, - out: &WebhookMode{ + out: &ReconcileMode{ CommonMode: CommonMode{ LogLevel: defaultLogLevel, ServerSocketPath: "SOCKETPATH", ServerAddress: "unix://SOCKETPATH", TrustDomain: "domain.test", Cluster: "CLUSTER", - Mode: "webhook", + Mode: "reconcile", DisabledNamespaces: []string{"kube-system", "kube-public"}, trustDomain: spiffeid.RequireTrustDomainFromString("domain.test"), }, - Addr: ":8443", - CertPath: defaultCertPath, - KeyPath: defaultKeyPath, - CaCertPath: defaultCaCertPath, - InsecureSkipClientVerification: false, + ControllerName: "spire-k8s-registrar", + ClusterDNSZone: "cluster.local", + LeaderElectionResourceLock: "configmaps", + MetricsAddr: ":8080", }, }, { name: "overrides", in: ` + mode = "reconcile" log_level = "LEVELOVERRIDE" log_path = "PATHOVERRIDE" - addr = ":1234" - cert_path = "CERTOVERRIDE" - key_path = "KEYOVERRIDE" - cacert_path = "CACERTOVERRIDE" - insecure_skip_client_verification = true server_socket_path = "SOCKETPATHOVERRIDE" trust_domain = "domain.test" cluster = "CLUSTEROVERRIDE" pod_label = "PODLABEL" + controller_name = "override" + cluster_dns_zone = "override.local" + leader_election_resource_lock = "leases" + metrics_addr = ":8081" `, - out: &WebhookMode{ + out: &ReconcileMode{ CommonMode: CommonMode{ LogLevel: "LEVELOVERRIDE", LogPath: "PATHOVERRIDE", @@ -103,15 +103,14 @@ func TestLoadMode(t *testing.T) { TrustDomain: "domain.test", Cluster: "CLUSTEROVERRIDE", PodLabel: "PODLABEL", - Mode: "webhook", + Mode: "reconcile", DisabledNamespaces: []string{"kube-system", "kube-public"}, trustDomain: spiffeid.RequireTrustDomainFromString("domain.test"), }, - Addr: ":1234", - CertPath: "CERTOVERRIDE", - KeyPath: "KEYOVERRIDE", - CaCertPath: "CACERTOVERRIDE", - InsecureSkipClientVerification: true, + ControllerName: "override", + ClusterDNSZone: "override.local", + LeaderElectionResourceLock: "leases", + MetricsAddr: ":8081", }, }, { diff --git a/support/k8s/k8s-workload-registrar/config_webhook.go b/support/k8s/k8s-workload-registrar/config_webhook.go deleted file mode 100644 index 395a79b540..0000000000 --- a/support/k8s/k8s-workload-registrar/config_webhook.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "context" - - "github.com/hashicorp/hcl" - "github.com/zeebo/errs" -) - -const ( - defaultAddr = ":8443" - defaultCertPath = "cert.pem" - defaultKeyPath = "key.pem" - defaultCaCertPath = "cacert.pem" -) - -type WebhookMode struct { - CommonMode - Addr string `hcl:"addr"` - CaCertPath string `hcl:"cacert_path"` - CertPath string `hcl:"cert_path"` - InsecureSkipClientVerification bool `hcl:"insecure_skip_client_verification"` - KeyPath string `hcl:"key_path"` -} - -func (c *WebhookMode) ParseConfig(hclConfig string) error { - if err := hcl.Decode(c, hclConfig); err != nil { - return errs.New("unable to decode configuration: %v", err) - } - - if c.Addr == "" { - c.Addr = defaultAddr - } - if c.CertPath == "" { - c.CertPath = defaultCertPath - } - if c.CaCertPath == "" { - c.CaCertPath = defaultCaCertPath - } - if c.KeyPath == "" { - c.KeyPath = defaultKeyPath - } - - return nil -} - -func (c *WebhookMode) Run(ctx context.Context) error { - log, err := c.SetupLogger() - if err != nil { - return errs.New("error setting up logging: %v", err) - } - defer log.Close() - - // TODO: Remove webhook mode in SPIRE 1.4 (see #2919) - log.Warn("The webhook mode is deprecated and will be removed in a future release.") - - entryClient, err := c.EntryClient(ctx, log) - if err != nil { - return errs.New("failed to dial server: %v", err) - } - - disabledNamespacesMap := make(map[string]bool, len(c.DisabledNamespaces)) - for _, ns := range c.DisabledNamespaces { - disabledNamespacesMap[ns] = true - } - controller := NewController(ControllerConfig{ - Log: log, - E: entryClient, - TrustDomain: c.TrustDomain, - Cluster: c.Cluster, - PodLabel: c.PodLabel, - PodAnnotation: c.PodAnnotation, - DisabledNamespaces: disabledNamespacesMap, - }) - - log.Info("Initializing registrar") - if err := controller.Initialize(ctx); err != nil { - return err - } - - server, err := NewServer(ServerConfig{ - Log: log, - Addr: c.Addr, - Handler: NewWebhookHandler(log, controller), - CertPath: c.CertPath, - KeyPath: c.KeyPath, - CaCertPath: c.CaCertPath, - InsecureSkipClientVerification: c.InsecureSkipClientVerification, - }) - if err != nil { - return err - } - - return server.Run(ctx) -} diff --git a/support/k8s/k8s-workload-registrar/controller.go b/support/k8s/k8s-workload-registrar/controller.go deleted file mode 100644 index f5b57e6eb6..0000000000 --- a/support/k8s/k8s-workload-registrar/controller.go +++ /dev/null @@ -1,307 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - - "github.com/sirupsen/logrus" - "github.com/spiffe/go-spiffe/v2/spiffeid" - entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" - "github.com/spiffe/spire-api-sdk/proto/spire/api/types" - "github.com/spiffe/spire/pkg/common/idutil" - "github.com/spiffe/spire/support/k8s/k8s-workload-registrar/federation" - "github.com/zeebo/errs" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - admv1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -type ControllerConfig struct { - Log logrus.FieldLogger - E entryv1.EntryClient - TrustDomain string - Cluster string - PodLabel string - PodAnnotation string - DisabledNamespaces map[string]bool -} - -type Controller struct { - c ControllerConfig -} - -func NewController(config ControllerConfig) *Controller { - return &Controller{ - c: config, - } -} - -func (c *Controller) Initialize(ctx context.Context) error { - parentID, err := c.makeID("%s", idutil.ServerIDPath) - if err != nil { - return err - } - spiffeID, err := c.nodeID() - if err != nil { - return err - } - // ensure there is a node registration entry for PSAT nodes in the cluster. - return c.createEntry(ctx, &types.Entry{ - ParentId: parentID, - SpiffeId: spiffeID, - Selectors: []*types.Selector{ - {Type: "k8s_psat", Value: fmt.Sprintf("cluster:%s", c.c.Cluster)}, - }, - }) -} - -func (c *Controller) ReviewAdmission(ctx context.Context, ar admv1.AdmissionReview) (*admv1.AdmissionResponse, error) { - req := ar.Request - c.c.Log.WithFields(logrus.Fields{ - "namespace": req.Namespace, - "name": req.Name, - "kind": req.Kind.Kind, - "version": req.Kind.Version, - "operation": req.Operation, - }).Debug("ReviewAdmission called") - - if err := c.reviewAdmission(ctx, req); err != nil { - return nil, err - } - - return &admv1.AdmissionResponse{ - UID: req.UID, - Allowed: true, - }, nil -} - -// reviewAdmission handles CREATE and DELETE requests for pods in -// non-kubernetes namespaces. Ideally the ValidatingAdmissionWebhook -// configuration has filters in place to restrict the admission requests. -func (c *Controller) reviewAdmission(ctx context.Context, req *admv1.AdmissionRequest) error { - if _, disabled := c.c.DisabledNamespaces[req.Namespace]; disabled { - return nil - } - - if req.Kind != (metav1.GroupVersionKind{Version: "v1", Kind: "Pod"}) { - c.c.Log.WithFields(logrus.Fields{ - "version": req.Kind.Version, - "kind": req.Kind.Kind, - }).Warn("Admission request received for unhandled object; check filters") - return nil - } - - switch req.Operation { - case admv1.Create: - pod := new(corev1.Pod) - if err := json.Unmarshal(req.Object.Raw, pod); err != nil { - return errs.New("unable to unmarshal %s/%s object: %v", req.Kind.Version, req.Kind.Kind, err) - } - return c.createPodEntry(ctx, pod) - case admv1.Delete: - return c.deletePodEntry(ctx, req.Namespace, req.Name) - default: - c.c.Log.WithFields(logrus.Fields{ - "operation": req.Operation, - }).Warn("Admission request received for unhandled pod operation; check filters") - } - - return nil -} - -// podSpiffeID returns the desired spiffe ID for the pod, or nil if it should be ignored -func (c *Controller) podSpiffeID(pod *corev1.Pod) (*types.SPIFFEID, error) { - if c.c.PodLabel != "" { - // the controller has been configured with a pod label. if the pod - // has that label, use the value to construct the pod entry. otherwise - // ignore the pod altogether. - if labelValue, ok := pod.Labels[c.c.PodLabel]; ok { - return c.makeID("/%s", labelValue) - } - return nil, nil - } - - if c.c.PodAnnotation != "" { - // the controller has been configured with a pod annotation. if the pod - // has that annotation, use the value to construct the pod entry. otherwise - // ignore the pod altogether. - if annotationValue, ok := pod.Annotations[c.c.PodAnnotation]; ok { - return c.makeID("/%s", annotationValue) - } - return nil, nil - } - - // the controller has not been configured with a pod label or a pod annotation. - // create an entry based on the service account. - return c.makeID("/ns/%s/sa/%s", pod.Namespace, pod.Spec.ServiceAccountName) -} - -func (c *Controller) createPodEntry(ctx context.Context, pod *corev1.Pod) error { - spiffeID, err := c.podSpiffeID(pod) - if err != nil { - return fmt.Errorf("unable to determine spiffeID: %w", err) - } - - // If we have no spiffe ID for the pod, do nothing - if spiffeID == nil { - return nil - } - - parentID, err := c.nodeID() - if err != nil { - return fmt.Errorf("unable to determine parentID: %w", err) - } - - federationDomains := federation.GetFederationDomains(pod) - - return c.createEntry(ctx, &types.Entry{ - ParentId: parentID, - SpiffeId: spiffeID, - Selectors: []*types.Selector{ - namespaceSelector(pod.Namespace), - podNameSelector(pod.Name), - }, - FederatesWith: federationDomains, - }) -} - -func (c *Controller) deletePodEntry(ctx context.Context, namespace, name string) error { - log := c.c.Log.WithFields(logrus.Fields{ - "ns": namespace, - "pod": name, - }) - - listResp, err := c.c.E.ListEntries(ctx, &entryv1.ListEntriesRequest{ - Filter: &entryv1.ListEntriesRequest_Filter{ - BySelectors: &types.SelectorMatch{ - Selectors: []*types.Selector{ - namespaceSelector(namespace), - podNameSelector(name), - }, - }, - }, - // Only the ID is needed, which is implicit in the mask. - OutputMask: &types.EntryMask{}, - }) - if err != nil { - return errs.New("unable to list pod entries: %v", err) - } - - log.Info("Deleting pod entries") - if len(listResp.Entries) > 1 { - log.WithField("count", len(listResp.Entries)).Warn("Multiple pod entries found to delete") - } - - entriesToDelete := make([]string, 0, len(listResp.Entries)) - for _, entry := range listResp.Entries { - entriesToDelete = append(entriesToDelete, entry.Id) - } - - deleteResp, err := c.c.E.BatchDeleteEntry(ctx, &entryv1.BatchDeleteEntryRequest{ - Ids: entriesToDelete, - }) - if err != nil { - return errs.New("unable to delete pod entries: %v", err) - } - - var errGroup errs.Group - for _, result := range deleteResp.Results { - err := errorFromStatus(result.Status) - switch status.Code(err) { - case codes.OK, codes.NotFound: - default: - log.WithError(err).Error("Failed deleting pod entry") - errGroup.Add(errs.New("unable to delete entry %q: %v", result.Id, err)) - } - } - return errGroup.Err() -} - -func (c *Controller) nodeID() (*types.SPIFFEID, error) { - return c.makeID("/k8s-workload-registrar/%s/node", c.c.Cluster) -} - -func (c *Controller) makeID(pathFmt string, pathArgs ...interface{}) (*types.SPIFFEID, error) { - path, err := spiffeid.FormatPath(pathFmt, pathArgs...) - if err != nil { - return nil, err - } - return &types.SPIFFEID{ - TrustDomain: c.c.TrustDomain, - Path: path, - }, nil -} - -func (c *Controller) createEntry(ctx context.Context, entry *types.Entry) error { - // ensure there is a node registration entry for PSAT nodes in the cluster. - log := c.c.Log.WithFields(logrus.Fields{ - "parent_id": entry.ParentId, - "spiffe_id": entry.SpiffeId, - "selectors": selectorsField(entry.Selectors), - }) - - resp, err := c.c.E.BatchCreateEntry(ctx, &entryv1.BatchCreateEntryRequest{ - Entries: []*types.Entry{entry}, - }) - if err != nil { - log.WithError(err).Error("Failed to create pod entry") - return err - } - - // These checks are purely defensive. - switch { - case len(resp.Results) > 1: - return errors.New("batch create response has too many results") - case len(resp.Results) < 1: - return errors.New("batch create response result empty") - } - - err = errorFromStatus(resp.Results[0].Status) - switch status.Code(err) { - case codes.OK, codes.AlreadyExists: - log.Info("Created pod entry") - return nil - default: - log.WithError(err).Error("Failed to create pod entry") - return err - } -} - -func namespaceSelector(namespace string) *types.Selector { - return &types.Selector{ - Type: "k8s", - Value: fmt.Sprintf("ns:%s", namespace), - } -} - -func podNameSelector(podName string) *types.Selector { - return &types.Selector{ - Type: "k8s", - Value: fmt.Sprintf("pod-name:%s", podName), - } -} - -func selectorsField(selectors []*types.Selector) string { - var buf bytes.Buffer - for i, selector := range selectors { - if i > 0 { - buf.WriteString(",") - } - buf.WriteString(selector.Type) - buf.WriteString(":") - buf.WriteString(selector.Value) - } - return buf.String() -} - -func errorFromStatus(s *types.Status) error { - if s == nil { - return errors.New("result status is unexpectedly nil") - } - return status.Error(codes.Code(s.Code), s.Message) -} diff --git a/support/k8s/k8s-workload-registrar/controller_test.go b/support/k8s/k8s-workload-registrar/controller_test.go deleted file mode 100644 index 9d5494284f..0000000000 --- a/support/k8s/k8s-workload-registrar/controller_test.go +++ /dev/null @@ -1,717 +0,0 @@ -package main - -import ( - "context" - "fmt" - "sort" - "sync" - "testing" - - "github.com/sirupsen/logrus/hooks/test" - "github.com/spiffe/go-spiffe/v2/spiffeid" - entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" - "github.com/spiffe/spire-api-sdk/proto/spire/api/types" - "github.com/spiffe/spire/test/spiretest" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/proto" - admv1 "k8s.io/api/admission/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -var ( - fakePodWithLabel = ` -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "PODNAME", - "namespace": "NAMESPACE", - "labels": { - "spire-workload": "WORKLOAD" - } - }, - "spec": { - "serviceAccountName": "SERVICEACCOUNT" - } -} -` - fakePodWithAnnotation = ` -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "PODNAME", - "namespace": "NAMESPACE", - "annotations": { - "spiffe.io/spiffe-id": "ENV/WORKLOAD" - } - }, - "spec": { - "serviceAccountName": "SERVICEACCOUNT" - } -} -` - - fakePodWithFederation = ` -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "PODNAME", - "namespace": "NAMESPACE", - "annotations": { - "spiffe.io/federatesWith": "example.net" - } - }, - "spec": { - "serviceAccountName": "SERVICEACCOUNT" - } -} -` - - fakePodWithMultiFederation = ` -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "PODNAME", - "namespace": "NAMESPACE", - "annotations": { - "spiffe.io/federatesWith": "example.net,example.io" - } - }, - "spec": { - "serviceAccountName": "SERVICEACCOUNT" - } -} -` - fakePodOnlySA = ` -{ - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "PODNAME-NOLABEL", - "namespace": "NAMESPACE" - }, - "spec": { - "serviceAccountName": "SERVICEACCOUNT" - } -} -` -) - -func TestControllerInitialization(t *testing.T) { - controller, r := newTestController("", "") - - // Initialize should create the registration entry for the cluster nodes - require.NoError(t, controller.Initialize(context.Background())) - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/spire/server"), - SpiffeId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - Selectors: []*types.Selector{ - {Type: "k8s_psat", Value: "cluster:CLUSTER"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerIgnoresKubeNamespaces(t *testing.T) { - controller, r := newTestController("", "") - - for _, namespace := range []string{"kube-system", "kube-public"} { - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: namespace, - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithLabel), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - require.Empty(t, r.GetEntries(), 0) - } -} - -func TestControllerIgnoresNonPods(t *testing.T) { - controller, r := newTestController("", "") - - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "ServiceAccount", - }, - Namespace: "NAMESPACE", - Name: "SERVICEACCOUNTNAME", - Operation: "CREATE", - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - require.Empty(t, r.GetEntries(), 0) -} - -func TestControllerFailsIfPodUnparsable(t *testing.T) { - controller, _ := newTestController("", "") - - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "POD", - Operation: "CREATE", - } - requireReviewAdmissionFailure(t, controller, admv1.AdmissionReview{Request: request}, "unable to unmarshal v1/Pod object") -} - -func TestControllerIgnoresPodOperationsOtherThanCreateAndDelete(t *testing.T) { - controller, _ := newTestController("", "") - - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "POD", - Operation: "UPDATE", - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) -} - -func TestControllerServiceAccountBasedRegistration(t *testing.T) { - controller, r := newTestController("", "") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithLabel), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - SpiffeId: mustIDFromString("spiffe://domain.test/ns/NAMESPACE/sa/SERVICEACCOUNT"), - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:NAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerCleansUpOnPodDeletion(t *testing.T) { - controller, r := newTestController("", "") - - // create an entry for the POD in one service account - r.CreateEntry(&types.Entry{ - Selectors: []*types.Selector{ - namespaceSelector("NAMESPACE"), - podNameSelector("PODNAME"), - }, - }) - - // create an entry for the POD in another service account (should be rare - // in practice but we need to handle it). - r.CreateEntry(&types.Entry{ - Selectors: []*types.Selector{ - namespaceSelector("OTHERNAMESPACE"), - podNameSelector("PODNAME"), - }, - }) - - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "DELETE", - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the right registration entry for the pod was removed - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000002", - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:OTHERNAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerLabelBasedRegistration(t *testing.T) { - controller, r := newTestController("spire-workload", "") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithLabel), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - SpiffeId: mustIDFromString("spiffe://domain.test/WORKLOAD"), - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:NAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerLabelBasedRegistrationIgnoresPodsWithoutLabel(t *testing.T) { - controller, r := newTestController("spire-workload", "") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodOnlySA), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - require.Len(t, r.GetEntries(), 0) -} - -func TestPodSpiffeId(t *testing.T) { - for _, testCase := range []struct { - name string - expectedSpiffeID string - configLabel string - podLabel string - configAnnotation string - podAnnotation string - podNamespace string - podServiceAccount string - }{ - { - name: "using namespace and serviceaccount", - expectedSpiffeID: "spiffe://domain.test/ns/NS/sa/SA", - podNamespace: "NS", - podServiceAccount: "SA", - }, - { - name: "using label", - expectedSpiffeID: "spiffe://domain.test/LABEL", - configLabel: "spiffe.io/label", - podLabel: "LABEL", - }, - { - name: "using annotation", - expectedSpiffeID: "spiffe://domain.test/ANNOTATION", - configAnnotation: "spiffe.io/annotation", - podAnnotation: "ANNOTATION", - }, - { - name: "ignore unannotated", - configAnnotation: "someannotation", - expectedSpiffeID: "", - }, - { - name: "ignore unlabelled", - configLabel: "somelabel", - expectedSpiffeID: "", - }, - } { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - c, _ := newTestController(testCase.configLabel, testCase.configAnnotation) - - // Set up pod: - pod := &corev1.Pod{ - Spec: corev1.PodSpec{ - ServiceAccountName: testCase.podServiceAccount, - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: testCase.podNamespace, - Labels: map[string]string{}, - Annotations: map[string]string{}, - }, - } - if testCase.configLabel != "" && testCase.podLabel != "" { - pod.Labels[testCase.configLabel] = testCase.podLabel - } - if testCase.configAnnotation != "" && testCase.podAnnotation != "" { - pod.Annotations[testCase.configAnnotation] = testCase.podAnnotation - } - - // Test: - spiffeID, err := c.podSpiffeID(pod) - - // Verify result: - require.NoError(t, err) - require.Equal(t, testCase.expectedSpiffeID, stringFromID(spiffeID)) - }) - } -} - -func TestControllerAnnotationBasedRegistration(t *testing.T) { - controller, r := newTestController("", "spiffe.io/spiffe-id") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithAnnotation), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - SpiffeId: mustIDFromString("spiffe://domain.test/ENV/WORKLOAD"), - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:NAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerFederationBasedRegistration(t *testing.T) { - controller, r := newTestController("", "") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithFederation), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - SpiffeId: mustIDFromString("spiffe://domain.test/ns/NAMESPACE/sa/SERVICEACCOUNT"), - FederatesWith: []string{"example.net"}, - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:NAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerMultiFederationBasedRegistration(t *testing.T) { - controller, r := newTestController("", "") - - // Send in a POD CREATE and assert that it will be admitted - request := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodWithMultiFederation), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{ - Request: request, - }) - - // Assert that the registration entry for the pod was created - requireEntriesEqual(t, []*types.Entry{ - { - Id: "00000001", - ParentId: mustIDFromString("spiffe://domain.test/k8s-workload-registrar/CLUSTER/node"), - SpiffeId: mustIDFromString("spiffe://domain.test/ns/NAMESPACE/sa/SERVICEACCOUNT"), - FederatesWith: []string{"example.net", "example.io"}, - Selectors: []*types.Selector{ - {Type: "k8s", Value: "ns:NAMESPACE"}, - {Type: "k8s", Value: "pod-name:PODNAME"}, - }, - }, - }, r.GetEntries()) -} - -func TestControllerAnnotationBasedRegistrationIgnoresPodsWithoutLabel(t *testing.T) { - controller, r := newTestController("", "spiffe.io/spiffe-id") - - // Send in a POD CREATE and assert that it will be admitted - ar := &admv1.AdmissionRequest{ - UID: "uid", - Kind: metav1.GroupVersionKind{ - Version: "v1", - Kind: "Pod", - }, - Namespace: "NAMESPACE", - Name: "PODNAME", - Operation: "CREATE", - Object: runtime.RawExtension{ - Raw: []byte(fakePodOnlySA), - }, - } - requireReviewAdmissionSuccess(t, controller, admv1.AdmissionReview{Request: ar}) - - // Assert that the registration entry for the pod was created - require.Len(t, r.GetEntries(), 0) -} - -func newTestController(podLabel, podAnnotation string) (*Controller, *fakeEntryClient) { - log, _ := test.NewNullLogger() - e := newFakeEntryClient() - return NewController(ControllerConfig{ - Log: log, - E: e, - TrustDomain: "domain.test", - Cluster: "CLUSTER", - PodLabel: podLabel, - PodAnnotation: podAnnotation, - DisabledNamespaces: map[string]bool{"kube-system": true, "kube-public": true}, - }), e -} - -func requireReviewAdmissionSuccess(t *testing.T, controller *Controller, ar admv1.AdmissionReview) { - resp, err := controller.ReviewAdmission(context.Background(), ar) - require.NoError(t, err) - require.Equal(t, &admv1.AdmissionResponse{ - UID: ar.Request.UID, - Allowed: true, - }, resp) -} - -func requireReviewAdmissionFailure(t *testing.T, controller *Controller, ar admv1.AdmissionReview, contains string) { - resp, err := controller.ReviewAdmission(context.Background(), ar) - require.Error(t, err) - require.Contains(t, err.Error(), contains) - require.Nil(t, resp) -} - -type fakeEntryClient struct { - entryv1.EntryClient - - mu sync.Mutex - nextID int64 - entries map[string]*types.Entry -} - -func newFakeEntryClient() *fakeEntryClient { - return &fakeEntryClient{ - entries: make(map[string]*types.Entry), - } -} - -func (c *fakeEntryClient) GetEntries() []*types.Entry { - c.mu.Lock() - defer c.mu.Unlock() - - entries := make([]*types.Entry, 0, len(c.entries)) - for _, entry := range c.entries { - entries = append(entries, cloneEntry(entry)) - } - sort.Slice(entries, func(i, j int) bool { - return entries[i].Id < entries[j].Id - }) - return entries -} - -func (c *fakeEntryClient) CreateEntry(entry *types.Entry) *types.Entry { - c.mu.Lock() - defer c.mu.Unlock() - - // Clone for storage - entry = cloneEntry(entry) - - c.nextID++ - entry.Id = fmt.Sprintf("%08x", c.nextID) - - c.entries[entry.Id] = entry - // Clone on the way out - return cloneEntry(entry) -} - -func (c *fakeEntryClient) BatchCreateEntry(ctx context.Context, req *entryv1.BatchCreateEntryRequest, opts ...grpc.CallOption) (*entryv1.BatchCreateEntryResponse, error) { - resp := new(entryv1.BatchCreateEntryResponse) - for _, entryIn := range req.Entries { - resp.Results = append(resp.Results, &entryv1.BatchCreateEntryResponse_Result{ - Status: &types.Status{}, - Entry: c.CreateEntry(entryIn), - }) - } - return resp, nil -} - -func (c *fakeEntryClient) BatchDeleteEntry(ctx context.Context, req *entryv1.BatchDeleteEntryRequest, opts ...grpc.CallOption) (*entryv1.BatchDeleteEntryResponse, error) { - c.mu.Lock() - defer c.mu.Unlock() - - resp := new(entryv1.BatchDeleteEntryResponse) - for _, id := range req.Ids { - _, ok := c.entries[id] - code := codes.OK - var msg string - if !ok { - code = codes.NotFound - msg = "not found" - } - - resp.Results = append(resp.Results, &entryv1.BatchDeleteEntryResponse_Result{ - Status: &types.Status{Code: int32(code), Message: msg}, - Id: id, - }) - delete(c.entries, id) - } - - return resp, nil -} - -func (c *fakeEntryClient) ListEntries(ctx context.Context, req *entryv1.ListEntriesRequest, opts ...grpc.CallOption) (*entryv1.ListEntriesResponse, error) { - switch { - case req.Filter == nil: - return nil, status.Error(codes.InvalidArgument, "expecting filter") - case req.Filter.BySelectors == nil: - return nil, status.Error(codes.InvalidArgument, "expecting filter by selector") - case req.Filter.BySelectors.Match != types.SelectorMatch_MATCH_EXACT: - return nil, status.Error(codes.InvalidArgument, "expecting exact selector match") - } - - // peform an exact match check against selectors - var entries []*types.Entry - for _, entry := range c.entries { - if selectorSetsEqual(req.Filter.BySelectors.Selectors, entry.Selectors) { - entries = append(entries, cloneEntry(entry)) - } - } - - return &entryv1.ListEntriesResponse{ - Entries: entries, - }, nil -} - -func requireEntriesEqual(t *testing.T, expected, actual []*types.Entry) { - spiretest.RequireProtoListEqual(t, expected, actual) -} - -func selectorSetsEqual(as, bs []*types.Selector) bool { - if len(as) != len(bs) { - return false - } - type sel struct { - t string - v string - } - set := map[sel]struct{}{} - for _, a := range as { - set[sel{t: a.Type, v: a.Value}] = struct{}{} - } - for _, b := range bs { - if _, ok := set[sel{t: b.Type, v: b.Value}]; !ok { - return false - } - } - return true -} - -func cloneEntry(in *types.Entry) *types.Entry { - return proto.Clone(in).(*types.Entry) -} - -func mustIDFromString(s string) *types.SPIFFEID { - id := spiffeid.RequireFromString(s) - return &types.SPIFFEID{ - TrustDomain: id.TrustDomain().String(), - Path: id.Path(), - } -} - -func stringFromID(id *types.SPIFFEID) string { - if id == nil { - return "" - } - return fmt.Sprintf("spiffe://%s%s", id.TrustDomain, id.Path) -} diff --git a/support/k8s/k8s-workload-registrar/generate-config.go b/support/k8s/k8s-workload-registrar/generate-config.go deleted file mode 100644 index 3a82760bc5..0000000000 --- a/support/k8s/k8s-workload-registrar/generate-config.go +++ /dev/null @@ -1,338 +0,0 @@ -// +build ignore - -package main - -import ( - "bufio" - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "flag" - "fmt" - "math/big" - "os" - "strings" - "text/template" - "time" -) - -var ( - namespaceFlag = flag.String("namespace", "spire", "Kubernetes namespace to put objects under") - serviceFlag = flag.String("service", "k8s-workload-registrar", "K8S service for the registrar") - caKeyAlgFlag = flag.String("ca-key-alg", "ec-p256", "key algorithm to use for the CA key") - serverKeyAlgFlag = flag.String("server-key-alg", "ec-p256", "key algorithm to use for the registrar server key") - clientKeyAlgFlag = flag.String("client-key-alg", "ec-p256", "key algorithm to use for the registrar client key") - ttlFlag = flag.Duration("ttl", time.Hour*24*365, "time to live for certificates (0 for never)") - - // The "never expires" timestamp from RFC5280 - neverExpires = time.Date(9999, 12, 31, 23, 59, 59, 0, time.UTC) - - now = time.Now() - - funcs = template.FuncMap{ - "inline": inlineFn, - "base64": base64Fn, - } - - tmpl = template.Must(template.New("").Funcs(funcs).Parse(` ---- - -# ConfigMap containing the K8S Workload Registrar server certificate and -# CA bundle used to verify the client certificate presented by the API server. -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar-certs - namespace: {{ .Namespace }} -data: - server-cert.pem: | -{{ inline 4 .ServerCert }} - cacert.pem: | -{{ inline 4 .CaCert }} - ---- - -# Kubernetes Secret containing the K8S Workload Registrar server key -apiVersion: v1 -kind: Secret -metadata: - name: k8s-workload-registrar-secret -type: Opaque -data: - server-key.pem: {{ base64 .ServerKey }} - ---- - -# KubeConfig with client credentials for the API Server to use to call the -# K8S Workload Registrar service -apiVersion: v1 -kind: Config -users: -- name: {{ .ServiceDNS }} - user: - client-certificate-data: {{ base64 .ClientCert }} - client-key-data: {{ base64 .ClientKey }} - ---- - -# Validating Webhook Configuration for the K8S Workload Registrar -# -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: k8s-workload-registrar-webhook -webhooks: - - name: {{ .ServiceDNS }} - clientConfig: - service: - name: {{ .Service }} - namespace: {{ .Namespace }} - path: "/validate" - caBundle: {{ base64 .CaCert }} - admissionReviewVersions: - - v1beta1 - - v1 - sideEffects: None - rules: - - apiGroups: [""] - apiVersions: ["v1"] - operations: ["CREATE", "DELETE"] - resources: ["pods"] - scope: "Namespaced" - -`)) -) - -func main() { - flag.Parse() - - serviceDNS := makeServiceDNS(*namespaceFlag, *serviceFlag) - - caKey := generateKey(*caKeyAlgFlag) - caCert := createCACert(caKey) - - serverKey := generateKey(*serverKeyAlgFlag) - serverCert := createServerCert(caCert, caKey, serverKey, serviceDNS) - - clientKey := generateKey(*clientKeyAlgFlag) - clientCert := createClientCert(caCert, caKey, clientKey) - - printTmpl(tmpl, map[string]interface{}{ - "Namespace": *namespaceFlag, - "Service": *serviceFlag, - "ServiceDNS": serviceDNS, - "CaCert": certPEM(caCert), - "ServerCert": certPEM(serverCert), - "ServerKey": keyPEM(serverKey), - "ClientCert": certPEM(clientCert), - "ClientKey": keyPEM(clientKey), - }) -} - -func generateKey(alg string) crypto.Signer { - switch strings.ToLower(alg) { - case "rsa-2048": - return generateRSAKey(2048) - case "rsa-4096": - return generateRSAKey(4096) - case "ec-p224": - return generateECKey(elliptic.P224()) - case "ec-p256": - return generateECKey(elliptic.P256()) - case "ec-p384": - return generateECKey(elliptic.P384()) - case "ec-p521": - return generateECKey(elliptic.P521()) - default: - die("unsupported key algorithm %q", alg) - // unreachable - return nil - } -} - -func generateRSAKey(bits int) crypto.Signer { - key, err := rsa.GenerateKey(rand.Reader, bits) - checkErr(err, "generating RSA key") - return key -} - -func generateECKey(curve elliptic.Curve) crypto.Signer { - key, err := ecdsa.GenerateKey(curve, rand.Reader) - checkErr(err, "generating EC key") - return key -} - -func createCACert(caKey crypto.Signer) *x509.Certificate { - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "K8S WORKLOAD REGISTRAR CA", - }, - KeyUsage: x509.KeyUsageCertSign | - x509.KeyUsageCRLSign, - BasicConstraintsValid: true, - IsCA: true, - } - return createCertificate(tmpl, tmpl, caKey, caKey) -} - -func createServerCert(caCert *x509.Certificate, caKey, key crypto.Signer, dnsName string) *x509.Certificate { - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "K8S WORKLOAD REGISTRAR SERVER", - }, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageKeyEncipherment | - x509.KeyUsageKeyAgreement | - x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageServerAuth, - }, - DNSNames: []string{dnsName}, - } - return createCertificate(tmpl, caCert, key, caKey) -} - -func createClientCert(caCert *x509.Certificate, caKey, key crypto.Signer) *x509.Certificate { - tmpl := &x509.Certificate{ - Subject: pkix.Name{ - CommonName: "K8S WORKLOAD REGISTRAR CLIENT", - }, - BasicConstraintsValid: true, - KeyUsage: x509.KeyUsageKeyEncipherment | - x509.KeyUsageKeyAgreement | - x509.KeyUsageDigitalSignature, - ExtKeyUsage: []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - }, - } - return createCertificate(tmpl, caCert, key, caKey) -} - -func createCertificate(tmpl, parent *x509.Certificate, key, parentKey crypto.Signer) *x509.Certificate { - tmpl.SerialNumber = randomSerial() - tmpl.NotBefore = now - tmpl.NotAfter = neverExpires - tmpl.AuthorityKeyId = parent.SubjectKeyId - tmpl.SubjectKeyId = getSubjectKeyId(key.Public()) - if *ttlFlag > 0 { - tmpl.NotAfter = now.Add(*ttlFlag) - } - - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, key.Public(), parentKey) - checkErr(err, "creating certificate") - cert, err := x509.ParseCertificate(certDER) - checkErr(err, "parsing certificate") - return cert -} - -func getSubjectKeyId(pubKey interface{}) []byte { - // Borrowed with love from cfssl under the BSD 2-Clause license - pubKeyBytes, err := x509.MarshalPKIXPublicKey(pubKey) - checkErr(err, "marshalling public key") - - var subjectKeyInfo = struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString - }{} - - _, err = asn1.Unmarshal(pubKeyBytes, &subjectKeyInfo) - checkErr(err, "marshalling subject key info") - - keyID := sha1.Sum(subjectKeyInfo.SubjectPublicKey.Bytes) - return keyID[:] -} - -func makeServiceDNS(namespace, service string) string { - return fmt.Sprintf("%s.%s.svc", service, namespace) -} - -func base64Encode(data []byte) string { - return base64.StdEncoding.EncodeToString(data) -} - -func certPEM(cert *x509.Certificate) string { - return encodePEM("CERTIFICATE", cert.Raw) -} - -func keyPEM(key crypto.PrivateKey) string { - keyBytes, err := x509.MarshalPKCS8PrivateKey(key) - checkErr(err, "marshaling private key") - return encodePEM("PRIVATE KEY", keyBytes) -} - -func encodePEM(typ string, bytes []byte) string { - return string(pem.EncodeToMemory(&pem.Block{ - Type: typ, - Bytes: bytes, - })) -} - -func printTmpl(tmpl *template.Template, data interface{}) { - err := tmpl.Execute(os.Stdout, data) - checkErr(err, "rendering %s template", tmpl.Name()) -} - -func printLn(args ...interface{}) { - _, err := fmt.Println(args...) - checkErr(err, "writing to stdout") -} - -func randomSerial() *big.Int { - b := randomBytes(8) - b[0] &= 0x7f - serial := int64(binary.BigEndian.Uint64(b)) - return big.NewInt(serial) -} - -func randomBytes(n int) []byte { - b := make([]byte, n) - _, err := rand.Read(b) - checkErr(err, "reading %d random bytes", n) - return b -} - -// base64Fn base64 encodes a string -func base64Fn(data string) string { - return base64Encode([]byte(data)) -} - -// inlineFn formats data at a specific indentation level for inclusion in YAML. -func inlineFn(level int, data string) (string, error) { - indentation := strings.Repeat(" ", level) - buf := new(bytes.Buffer) - scanner := bufio.NewScanner(strings.NewReader(data)) - for scanner.Scan() { - text := scanner.Text() - if text != "" { - buf.WriteString(indentation) - } - buf.WriteString(text) - buf.WriteString("\n") - } - if err := scanner.Err(); err != nil { - return "", err - } - return buf.String(), nil -} - -func checkErr(err error, format string, args ...interface{}) { - if err != nil { - die("%s failed: %+v", fmt.Sprintf(format, args...), err) - } -} - -func die(format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, format+"\n", args) - os.Exit(1) -} diff --git a/support/k8s/k8s-workload-registrar/mode-crd/README.md b/support/k8s/k8s-workload-registrar/mode-crd/README.md index ab79805d99..e33d67f5ce 100644 --- a/support/k8s/k8s-workload-registrar/mode-crd/README.md +++ b/support/k8s/k8s-workload-registrar/mode-crd/README.md @@ -484,3 +484,14 @@ to any pod in the namespace. If allowing users to manually create SpiffeID custom resources it is important to use the Validating Webhook. The Validating Webhook ensures that registration entries created have a namespace selector that matches the namespace the resource was created in. This ensures that the manually created entries can only be consumed by workloads within that namespace. + +## Troubleshooting + +### Migrating to the CRD mode from the deprecated webhook mode + +The k8s ValidatingWebhookConfiguration will need to be removed or pods may fail admission. If you used the default +configuration this can be done with: + +``` +kubectl validatingwebhookconfiguration delete k8s-workload-registrar-webhook +``` diff --git a/support/k8s/k8s-workload-registrar/server.go b/support/k8s/k8s-workload-registrar/server.go deleted file mode 100644 index 49786c66e6..0000000000 --- a/support/k8s/k8s-workload-registrar/server.go +++ /dev/null @@ -1,106 +0,0 @@ -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "net" - "net/http" - "os" - "time" - - "github.com/sirupsen/logrus" - "github.com/zeebo/errs" -) - -type ServerConfig struct { - Log logrus.FieldLogger - Handler http.Handler - Addr string - CertPath string - KeyPath string - CaCertPath string - InsecureSkipClientVerification bool -} - -type Server struct { - config ServerConfig - listener net.Listener - server *http.Server -} - -func NewServer(config ServerConfig) (*Server, error) { - cert, err := tls.LoadX509KeyPair(config.CertPath, config.KeyPath) - if err != nil { - return nil, errs.New("unable to load server keypair: %v", err) - } - - tlsConfig := &tls.Config{ - Certificates: []tls.Certificate{cert}, - MinVersion: tls.VersionTLS12, - } - if !config.InsecureSkipClientVerification { - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - - var err error - tlsConfig.ClientCAs, err = loadCA(config.CaCertPath) - if err != nil { - return nil, err - } - } - - listener, err := net.Listen("tcp", config.Addr) - if err != nil { - return nil, errs.New("unable to listen: %v", err) - } - - server := &http.Server{ - Handler: config.Handler, - TLSConfig: tlsConfig, - } - - return &Server{ - config: config, - listener: listener, - server: server, - }, nil -} - -func (s *Server) Addr() net.Addr { - return s.listener.Addr() -} - -func (s *Server) Run(ctx context.Context) error { - s.config.Log.WithFields(logrus.Fields{ - "addr": s.listener.Addr(), - "skip_client_verification": s.config.InsecureSkipClientVerification, - }).Info("Serving HTTPS") - - errCh := make(chan error, 1) - go func() { - errCh <- s.server.ServeTLS(s.listener, "", "") - s.listener.Close() - }() - - select { - case <-ctx.Done(): - // wait at most ten seconds for connections to drain before giving up. - shutdownCtx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - return errs.Wrap(s.server.Shutdown(shutdownCtx)) - case err := <-errCh: - return errs.Wrap(err) - } -} - -func loadCA(path string) (*x509.CertPool, error) { - pemBytes, err := os.ReadFile(path) - if err != nil { - return nil, errs.New("unable to read cacert file: %v", err) - } - pool := x509.NewCertPool() - if !pool.AppendCertsFromPEM(pemBytes) { - return nil, errs.New("unable to parse cacert file: %v", err) - } - return pool, nil -} diff --git a/support/k8s/k8s-workload-registrar/server_test.go b/support/k8s/k8s-workload-registrar/server_test.go deleted file mode 100644 index 28e4ea5959..0000000000 --- a/support/k8s/k8s-workload-registrar/server_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package main - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io" - "math/big" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "testing" - "time" - - logtest "github.com/sirupsen/logrus/hooks/test" - "github.com/spiffe/spire/pkg/common/pemutil" - "github.com/spiffe/spire/test/spiretest" - "github.com/stretchr/testify/require" -) - -var ( - testKeyPEM = []byte(`-----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgu4d/KpV4RMNNr8I6 -czfmH5spJ0LK1r8P8WnkuRZMNDyhRANCAARSUEgB5UlimKzT4TOBs/Dhh3oDF8kr -xrHoko3NlsLMmZn282gMYb+0Au9R+IXllaYy8+vuW9R7VctQwmaAgGU4 ------END PRIVATE KEY-----`) - - testKey, _ = pemutil.ParseSigner(testKeyPEM) -) - -func TestServer(t *testing.T) { - dir := spiretest.TempDir(t) - - keyPath := filepath.Join(dir, "key.pem") - certPath := filepath.Join(dir, "cert.pem") - caCertPath := filepath.Join(dir, "cacert.pem") - badPath := filepath.Join(dir, "bad") - - serverCert := createServerCertificate(t) - clientCert := createClientCertificate(t) - - writeFile(t, keyPath, testKeyPEM, 0600) - writeCertPEM(t, certPath, serverCert) - writeCertPEM(t, caCertPath, clientCert) - - rootCAs := x509.NewCertPool() - rootCAs.AddCert(serverCert) - - testCases := []struct { - name string - config ServerConfig - cert *x509.Certificate - newErr string - reqErr string - }{ - { - name: "bad addr", - config: ServerConfig{ - Addr: "this is not a good addr", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - newErr: "unable to listen", - }, - { - name: "bad cert", - config: ServerConfig{ - Addr: "localhost:0", - CertPath: badPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - newErr: "unable to load server keypair", - }, - { - name: "bad key", - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: badPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - newErr: "unable to load server keypair", - }, - { - name: "bad cacert", - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: badPath, - InsecureSkipClientVerification: false, - }, - newErr: "unable to read cacert file", - }, - { - name: "fails over TLS", - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - reqErr: "remote error: tls: bad certificate", - }, - { - name: "success over TLS when verification skipped", - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: true, - }, - }, - { - name: "fails over mTLS with bad cert", - cert: serverCert, - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - reqErr: "remote error: tls: bad certificate", - }, - { - name: "success over mTLS", - cert: clientCert, - config: ServerConfig{ - Addr: "localhost:0", - CertPath: certPath, - KeyPath: keyPath, - CaCertPath: caCertPath, - InsecureSkipClientVerification: false, - }, - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - log, _ := logtest.NewNullLogger() - - // prepare the config - config := testCase.config - config.Log = log - config.Handler = http.HandlerFunc(echoHandler) - - // initialize the server - server, err := NewServer(config) - if !checkErr(t, err, testCase.newErr) { - return - } - - // set up the transport - transport := &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - MinVersion: tls.VersionTLS12, - }, - TLSHandshakeTimeout: time.Second * 10, - } - if testCase.cert != nil { - transport.TLSClientConfig.Certificates = []tls.Certificate{ - { - Certificate: [][]byte{testCase.cert.Raw}, - PrivateKey: testKey, - }, - } - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - errCh := make(chan error, 1) - go func() { - errCh <- server.Run(ctx) - }() - - // do the request - client := http.Client{ - Transport: transport, - } - resp, err := client.Post(fmt.Sprintf("https://%s", server.Addr()), "", strings.NewReader("Hello")) - if !checkErr(t, err, testCase.reqErr) { - return - } - defer resp.Body.Close() - - // assert the response which shows the handler was wired up - buf := new(bytes.Buffer) - _, err = buf.ReadFrom(resp.Body) - require.NoError(t, err) - require.Equal(t, "Hello", buf.String()) - - cancel() - require.NoError(t, <-errCh) - }) - } -} - -func createClientCertificate(t *testing.T) *x509.Certificate { - return createCertificate(t, &x509.Certificate{ - SerialNumber: big.NewInt(0), - NotAfter: time.Now().Add(time.Hour), - }) -} - -func createServerCertificate(t *testing.T) *x509.Certificate { - return createCertificate(t, &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: "localhost", - }, - NotAfter: time.Now().Add(time.Hour), - IPAddresses: []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback}, - }) -} - -func createCertificate(t *testing.T, tmpl *x509.Certificate) *x509.Certificate { - certDER, err := x509.CreateCertificate(rand.Reader, tmpl, tmpl, testKey.Public(), testKey) - require.NoError(t, err) - cert, err := x509.ParseCertificate(certDER) - require.NoError(t, err) - return cert -} - -func writeCertPEM(t *testing.T, path string, cert *x509.Certificate) { - certPEM := pem.EncodeToMemory(&pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }) - writeFile(t, path, certPEM, 0644) -} - -func writeFile(t *testing.T, path string, data []byte, mode os.FileMode) { - err := os.WriteFile(path, data, mode) - require.NoError(t, err) -} - -func checkErr(t *testing.T, err error, expected string) bool { - if expected == "" { - require.NoError(t, err) - return true - } - require.Error(t, err) - require.Contains(t, err.Error(), expected) - return false -} - -func echoHandler(w http.ResponseWriter, req *http.Request) { - _, _ = io.Copy(w, req.Body) -} diff --git a/support/k8s/k8s-workload-registrar/webhookhandler.go b/support/k8s/k8s-workload-registrar/webhookhandler.go deleted file mode 100644 index cb528f9120..0000000000 --- a/support/k8s/k8s-workload-registrar/webhookhandler.go +++ /dev/null @@ -1,204 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/sirupsen/logrus" - admv1 "k8s.io/api/admission/v1" - admv1beta1 "k8s.io/api/admission/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - runtimeScheme = runtime.NewScheme() - codecs = serializer.NewCodecFactory(runtimeScheme) - deserializer = codecs.UniversalDeserializer() -) - -type AdmissionController interface { - ReviewAdmission(context.Context, admv1.AdmissionReview) (*admv1.AdmissionResponse, error) -} - -type WebhookHandler struct { - log logrus.FieldLogger - controller AdmissionController -} - -func NewWebhookHandler(log logrus.FieldLogger, controller AdmissionController) *WebhookHandler { - _ = admv1.AddToScheme(runtimeScheme) - _ = admv1beta1.AddToScheme(runtimeScheme) - return &WebhookHandler{ - log: log, - controller: controller, - } -} - -// admitv1beta1Func handles a v1beta1 admission -type admitv1beta1Func func(context.Context, admv1beta1.AdmissionReview) (*admv1beta1.AdmissionResponse, error) - -// admitv1beta1Func handles a v1 admission -type admitv1Func func(context.Context, admv1.AdmissionReview) (*admv1.AdmissionResponse, error) - -// admitHandler is a handler, for both validators and mutators, that supports multiple admission review versions -type admitHandler struct { - v1beta1 admitv1beta1Func - v1 admitv1Func -} - -func newDelegateToV1AdmitHandler(f admitv1Func) admitHandler { - return admitHandler{ - v1beta1: delegateV1beta1AdmitToV1(f), - v1: f, - } -} - -func delegateV1beta1AdmitToV1(f admitv1Func) admitv1beta1Func { - return func(context context.Context, review admv1beta1.AdmissionReview) (*admv1beta1.AdmissionResponse, error) { - in := admv1.AdmissionReview{Request: convertAdmissionRequestToV1(review.Request)} - out, err := f(context, in) - if err != nil { - return nil, err - } - return convertAdmissionResponseToV1beta1(out), nil - } -} - -func convertAdmissionRequestToV1(r *admv1beta1.AdmissionRequest) *admv1.AdmissionRequest { - return &admv1.AdmissionRequest{ - Kind: r.Kind, - Namespace: r.Namespace, - Name: r.Name, - Object: r.Object, - Resource: r.Resource, - Operation: admv1.Operation(r.Operation), - UID: r.UID, - DryRun: r.DryRun, - OldObject: r.OldObject, - Options: r.Options, - RequestKind: r.RequestKind, - RequestResource: r.RequestResource, - RequestSubResource: r.RequestSubResource, - SubResource: r.SubResource, - UserInfo: r.UserInfo, - } -} - -func convertAdmissionResponseToV1beta1(r *admv1.AdmissionResponse) *admv1beta1.AdmissionResponse { - var pt *admv1beta1.PatchType - if r.PatchType != nil { - t := admv1beta1.PatchType(*r.PatchType) - pt = &t - } - return &admv1beta1.AdmissionResponse{ - UID: r.UID, - Allowed: r.Allowed, - AuditAnnotations: r.AuditAnnotations, - Patch: r.Patch, - PatchType: pt, - Result: r.Result, - } -} - -func (h *WebhookHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if req.URL.Path != "/validate" { - http.Error(w, "Not found", http.StatusNotFound) - return - } - - if req.Method != http.MethodPost { - http.Error(w, "Expected POST", http.StatusMethodNotAllowed) - return - } - - if ct := req.Header.Get("Content-Type"); ct != "application/json" { - http.Error(w, "Expected JSON content", http.StatusBadRequest) - return - } - - var body []byte - if data, err := io.ReadAll(req.Body); err == nil { - body = data - } - - obj, gvk, err := deserializer.Decode(body, nil, nil) - if err != nil { - msg := fmt.Sprintf("Malformed JSON body: %v", err) - h.log.Error(msg) - http.Error(w, msg, http.StatusBadRequest) - return - } - - log := h.log.WithFields(logrus.Fields{ - "version": gvk.Version, - "kind": gvk.Kind, - }) - admit := newDelegateToV1AdmitHandler(h.controller.ReviewAdmission) - ctx := req.Context() - - var responseObj runtime.Object - switch *gvk { - case admv1beta1.SchemeGroupVersion.WithKind("AdmissionReview"): - requestedAdmissionReview, ok := obj.(*admv1beta1.AdmissionReview) - if !ok { - msg := fmt.Sprintf("Expected v1beta1.AdmissionReview but got: %T", obj) - log.Error(msg) - http.Error(w, msg, http.StatusBadRequest) - return - } - responseAdmissionReview := &admv1beta1.AdmissionReview{} - responseAdmissionReview.SetGroupVersionKind(*gvk) - resp, err := admit.v1beta1(ctx, *requestedAdmissionReview) - if err != nil { - msg := fmt.Sprintf("Internal error occurred: %v", err) - log.Error(msg) - http.Error(w, msg, http.StatusInternalServerError) - return - } - responseAdmissionReview.Response = resp - responseAdmissionReview.Response.UID = requestedAdmissionReview.Request.UID - responseObj = responseAdmissionReview - case admv1.SchemeGroupVersion.WithKind("AdmissionReview"): - requestedAdmissionReview, ok := obj.(*admv1.AdmissionReview) - if !ok { - msg := fmt.Sprintf("Expected v1.AdmissionReview but got: %T", obj) - log.Error(msg) - http.Error(w, msg, http.StatusBadRequest) - return - } - responseAdmissionReview := &admv1.AdmissionReview{} - responseAdmissionReview.SetGroupVersionKind(*gvk) - resp, err := admit.v1(ctx, *requestedAdmissionReview) - if err != nil { - msg := fmt.Sprintf("Internal error occurred: %v", err) - log.Error(msg) - http.Error(w, msg, http.StatusInternalServerError) - return - } - responseAdmissionReview.Response = resp - responseAdmissionReview.Response.UID = requestedAdmissionReview.Request.UID - responseObj = responseAdmissionReview - default: - msg := fmt.Sprintf("Unsupported group version kind: %v", gvk) - log.Error(msg) - http.Error(w, msg, http.StatusBadRequest) - return - } - - log.Debugf("Sending response: %v", responseObj) - - respBytes, err := json.Marshal(responseObj) - if err != nil { - log.Error(err) - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - if _, err := w.Write(respBytes); err != nil { - log.Error(err) - } -} diff --git a/support/k8s/k8s-workload-registrar/webhookhandler_test.go b/support/k8s/k8s-workload-registrar/webhookhandler_test.go deleted file mode 100644 index 96a998697c..0000000000 --- a/support/k8s/k8s-workload-registrar/webhookhandler_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package main - -import ( - "context" - "errors" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - - "github.com/sirupsen/logrus/hooks/test" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - admv1 "k8s.io/api/admission/v1" -) - -func TestHandler(t *testing.T) { - log, _ := test.NewNullLogger() - controller := newFakeController() - handler := NewWebhookHandler(log, controller) - - testCases := []struct { - name string - method string - path string - reqHeader http.Header - reqBody string - status int - respHeader http.Header - respBody string - }{ - { - name: "wrong path", - method: "POST", - path: "/whatever", - status: http.StatusNotFound, - respBody: "Not found\n", - }, - { - name: "not a POST", - method: "GET", - path: "/validate", - status: http.StatusMethodNotAllowed, - respBody: "Expected POST\n", - }, - { - name: "no JSON content type header", - method: "POST", - path: "/validate", - status: http.StatusBadRequest, - respBody: "Expected JSON content\n", - }, - { - name: "malformed JSON content", - status: http.StatusBadRequest, - method: "POST", - path: "/validate", - reqHeader: http.Header{ - "Content-Type": []string{"application/json"}, - }, - respBody: "Malformed JSON body: Object 'Kind' is missing in ''\n", - }, - { - name: "missing request", - status: http.StatusBadRequest, - method: "POST", - path: "/validate", - reqHeader: http.Header{ - "Content-Type": []string{"application/json"}, - }, - reqBody: "{}", - respBody: "Malformed JSON body: Object 'Kind' is missing in '{}'\n", - }, - { - name: "success", - status: http.StatusOK, - method: "POST", - path: "/validate", - reqHeader: http.Header{ - "Content-Type": []string{"application/json"}, - }, - reqBody: `{ "apiVersion": "admission.k8s.io/v1", "kind": "AdmissionReview", "request": { "uid": "0df28fbd-5f5f-11e8-bc74-36e6bb280816" } }`, - respHeader: http.Header{ - "Content-Type": []string{"application/json"}, - }, - respBody: "{\"kind\":\"AdmissionReview\",\"apiVersion\":\"admission.k8s.io/v1\",\"response\":{\"uid\":\"0df28fbd-5f5f-11e8-bc74-36e6bb280816\",\"allowed\":true}}", - }, - { - name: "failure", - status: http.StatusBadRequest, - method: "POST", - path: "/validate", - reqHeader: http.Header{ - "Content-Type": []string{"application/json"}, - }, - reqBody: "{\"request\": {\"uid\":\"FAILME\"}}", - respBody: "Malformed JSON body: Object 'Kind' is missing in '{\"request\": {\"uid\":\"FAILME\"}}'\n", - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - u := url.URL{ - Scheme: "http", - Host: "localhost", - Path: testCase.path, - } - req, err := http.NewRequest(testCase.method, u.String(), strings.NewReader(testCase.reqBody)) - req.Header = testCase.reqHeader - require.NoError(t, err) - - w := httptest.NewRecorder() - handler.ServeHTTP(w, req) - - resp := w.Result() - defer resp.Body.Close() - - assert.Equal(t, testCase.status, resp.StatusCode) - if testCase.respHeader != nil { - assert.Equal(t, testCase.respHeader, resp.Header) - } - assert.Equal(t, testCase.respBody, w.Body.String()) - }) - } -} - -type fakeController struct{} - -func newFakeController() *fakeController { - return &fakeController{} -} - -func (*fakeController) ReviewAdmission(ctx context.Context, ar admv1.AdmissionReview) (*admv1.AdmissionResponse, error) { - if ar.Request.UID == "FAILME" { - return nil, errors.New("ohno") - } - return &admv1.AdmissionResponse{ - UID: ar.Request.UID, - Allowed: true, - }, nil -} diff --git a/test/integration/README.md b/test/integration/README.md index aa90ca3fe3..9683d6ab7a 100644 --- a/test/integration/README.md +++ b/test/integration/README.md @@ -81,7 +81,7 @@ The following environment variables are available to the teardown script: * [Envoy SDS (v3)](suites/envoy-sds-v3/README.md) * [Ghostunnel + Federation](suites/ghostunnel-federation/README.md) * [Join Token](suites/join-token/README.md) -* [Kubernetes (K8S Workload Registrar "webhook" mode)](suites/k8s/README.md) +* [Kubernetes (K8S Workload Registrar "crd" mode)](suites/k8s-crd-mode/README.md) * [Kubernetes (K8S Workload Registrar "reconcile" mode)](suites/k8s-reconcile/README.md) * [Nested Rotation](suites/nested-rotation/README.md) * [Node Attestation](suites/node-attestation/README.md) diff --git a/test/integration/suites/k8s-scratch/00-setup b/test/integration/suites/k8s-scratch/00-setup deleted file mode 100755 index b47b612344..0000000000 --- a/test/integration/suites/k8s-scratch/00-setup +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Create a temporary path that will be added to the PATH to avoid picking up -# binaries from the environment that aren't a version match. -mkdir -p ./bin - -KIND_PATH=./bin/kind -KUBECTL_PATH=./bin/kubectl - -# Download kind at the expected version at the given path. -download-kind "${KIND_PATH}" - -# Download kubectl at the expected version. -download-kubectl "${KUBECTL_PATH}" - -# We must supply an absolute path to the configuration directory. Replace the -# CONFDIR variable in the kind configuration with the conf directory of the -# running test. -sed -i.bak "s#CONFDIR#${PWD}/conf#g" conf/kind-config.yaml -rm conf/kind-config.yaml.bak - -# Start the kind cluster. -start-kind-cluster "${KIND_PATH}" k8stest ./conf/kind-config.yaml - -# Load the given images in the cluster. -container_images=("spire-server-scratch:latest-local" "spire-agent-scratch:latest-local" "k8s-workload-registrar-scratch:latest-local") -load-images "${KIND_PATH}" k8stest "${container_images[@]}" - -# Set the kubectl context. -set-kubectl-context "${KUBECTL_PATH}" kind-k8stest diff --git a/test/integration/suites/k8s-scratch/01-apply-config b/test/integration/suites/k8s-scratch/01-apply-config deleted file mode 100755 index 4286ab9444..0000000000 --- a/test/integration/suites/k8s-scratch/01-apply-config +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -source init-kubectl - -wait-for-rollout() { - ns=$1 - obj=$2 - MAXROLLOUTCHECKS=12 - ROLLOUTCHECKINTERVAL=15s - for ((i=0; i<${MAXROLLOUTCHECKS}; i++)); do - log-info "checking rollout status for ${ns} ${obj}..." - if ./bin/kubectl "-n${ns}" rollout status "$obj" --timeout="${ROLLOUTCHECKINTERVAL}"; then - return - fi - log-warn "describing ${ns} ${obj}..." - ./bin/kubectl "-n${ns}" describe "$obj" || true - log-warn "logs for ${ns} ${obj}..." - ./bin/kubectl "-n${ns}" logs --all-containers "$obj" || true - done - fail-now "Failed waiting for ${obj} to roll out." -} - -./bin/kubectl create namespace spire -./bin/kubectl apply -k ./conf/server -wait-for-rollout spire deployment/spire-server -./bin/kubectl apply -k ./conf/webhook -./bin/kubectl apply -k ./conf/agent -wait-for-rollout spire daemonset/spire-agent - -# Apply this separately after all of the spire infrastructure has been rolled -# out, otherwise the k8s-workload-registrar might miss its chance to create -# an entry for it -./bin/kubectl apply -f ./conf/workload.yaml -wait-for-rollout spire deployment/example-workload diff --git a/test/integration/suites/k8s-scratch/02-check-for-workload-svid b/test/integration/suites/k8s-scratch/02-check-for-workload-svid deleted file mode 100755 index 12e62e804a..0000000000 --- a/test/integration/suites/k8s-scratch/02-check-for-workload-svid +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -source init-kubectl - -MAXFETCHCHECKS=60 -FETCHCHECKINTERVAL=1 -for ((i=1; i<=${MAXFETCHCHECKS}; i++)); do - EXAMPLEPOD=$(./bin/kubectl -nspire get pod -l app=example-workload -o jsonpath="{.items[0].metadata.name}") - log-info "checking for workload SPIFFE ID ($i of $MAXFETCHCHECKS max)..." - if ./bin/kubectl -nspire exec -t "${EXAMPLEPOD}" -- \ - /opt/spire/bin/spire-agent api fetch \ - | grep "SPIFFE ID:"; then - DONE=1 - break - fi - sleep "${FETCHCHECKINTERVAL}" -done - -if [ "${DONE}" -eq 1 ]; then - log-info "SPIFFE ID found." -else - fail-now "timed out waiting for workload to obtain credentials." -fi diff --git a/test/integration/suites/k8s-scratch/README.md b/test/integration/suites/k8s-scratch/README.md deleted file mode 100644 index 9bb54b1c5f..0000000000 --- a/test/integration/suites/k8s-scratch/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Kubernetes Suite - -## Description - -This suite sets up a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io) and asserts the following: - -* SPIRE server, agent and workload registrar static compiled for inclusion in scratch image. -* SPIRE server attests SPIRE agents by verifying Kubernetes Projected Service - Account Tokens (i.e. `k8s_psat`) via the Token Review API. -* Workloads are registered via the K8S Workload Registrar and are able to - obtain identities without the need for manually maintained registration - entries. - -## Prerequisites - -```bash -# scratch test use the alpine agent for the workload -make images -make scratch-images -``` diff --git a/test/integration/suites/k8s-scratch/conf/admctrl/admission-control.yaml b/test/integration/suites/k8s-scratch/conf/admctrl/admission-control.yaml deleted file mode 100644 index 05480c2df9..0000000000 --- a/test/integration/suites/k8s-scratch/conf/admctrl/admission-control.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: ValidatingAdmissionWebhook - configuration: - apiVersion: apiserver.config.k8s.io/v1alpha1 - kind: WebhookAdmission - kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml diff --git a/test/integration/suites/k8s-scratch/conf/admctrl/kubeconfig.yaml b/test/integration/suites/k8s-scratch/conf/admctrl/kubeconfig.yaml deleted file mode 100644 index 72942c5ae7..0000000000 --- a/test/integration/suites/k8s-scratch/conf/admctrl/kubeconfig.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# KubeConfig with client credentials for the API Server to use to call the -# K8S Workload Registrar service -apiVersion: v1 -kind: Config -users: -- name: k8s-workload-registrar.spire.svc - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ1VENDQVYrZ0F3SUJBZ0lJVVNIdmpGQTFxRHd3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93S0RFbU1DUUdBMVVFQXhNZFN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClRFbEZUbFF3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYKMXk0VDVKTVdBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQpvM1V3Y3pBT0JnTlZIUThCQWY4RUJBTUNBNmd3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdJd0RBWURWUjBUCkFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVW9EYlBiOUpWNXhqZlZVMnBhSzd2UUNsZ2d3SXdId1lEVlIwakJCZ3cKRm9BVW02eFNULzJCUzRYdmhVcXVzaDJCTEwwdlJNSXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWdHNzRQeWkyZQpONlBEcVRGRnY1UDFjNFhjVVdERzMwdzJIZEU4Wm8rMStVWUNJUURUL2xMa2dUUjUzV01INVRqWkllblhmYzFjCmxkMGlqSmpvRFJIR3lIRjJxdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1BhSWtTTVowUmduQllWYncKMDIrdlN5UUpDM2RtZ0VDNFBLN2svTnk4Qnh1aFJBTkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYxeTRUNUpNVwpBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/test/integration/suites/k8s-scratch/conf/agent/kustomization.yaml b/test/integration/suites/k8s-scratch/conf/agent/kustomization.yaml deleted file mode 100644 index 571ffcf771..0000000000 --- a/test/integration/suites/k8s-scratch/conf/agent/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - spire-agent.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s-scratch/conf/agent/spire-agent.yaml b/test/integration/suites/k8s-scratch/conf/agent/spire-agent.yaml deleted file mode 100644 index 7d77963bf7..0000000000 --- a/test/integration/suites/k8s-scratch/conf/agent/spire-agent.yaml +++ /dev/null @@ -1,167 +0,0 @@ -# ServiceAccount for the SPIRE agent -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spire-agent - namespace: spire - ---- - -# Required cluster role to allow spire-agent to query k8s API server -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-agent-cluster-role -rules: -- apiGroups: [""] - resources: ["pods","nodes","nodes/proxy"] - verbs: ["get"] - ---- - -# Binds above cluster role to spire-agent service account -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-agent-cluster-role-binding -subjects: -- kind: ServiceAccount - name: spire-agent - namespace: spire -roleRef: - kind: ClusterRole - name: spire-agent-cluster-role - apiGroup: rbac.authorization.k8s.io - - ---- - -# ConfigMap for the SPIRE agent featuring: -# 1) PSAT node attestation -# 2) K8S Workload Attestation over the secure kubelet port -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-agent - namespace: spire -data: - agent.conf: | - agent { - data_dir = "/run/spire" - log_level = "DEBUG" - server_address = "spire-server" - server_port = "8081" - trust_bundle_path = "/run/spire/bundle/bundle.crt" - trust_domain = "example.org" - } - - plugins { - NodeAttestor "k8s_psat" { - plugin_data { - cluster = "example-cluster" - } - } - - KeyManager "memory" { - plugin_data { - } - } - - WorkloadAttestor "k8s" { - plugin_data { - # Defaults to the secure kubelet port by default. - # Minikube does not have a cert in the cluster CA bundle that - # can authenticate the kubelet cert, so skip validation. - skip_kubelet_verification = true - } - } - } - - health_checks { - listener_enabled = true - bind_address = "0.0.0.0" - bind_port = "8080" - live_path = "/live" - ready_path = "/ready" - } - ---- - -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: spire-agent - namespace: spire - labels: - app: spire-agent -spec: - selector: - matchLabels: - app: spire-agent - updateStrategy: - type: RollingUpdate - template: - metadata: - namespace: spire - labels: - app: spire-agent - spec: - # hostPID is required for K8S Workload Attestation. - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: spire-agent - initContainers: - - name: init - # This is a small image with wait-for-it, choose whatever image - # you prefer that waits for a service to be up. This image is built - # from https://github.com/lqhl/wait-for-it - image: gcr.io/spiffe-io/wait-for-it - args: ["-t", "30", "spire-server:8081"] - containers: - - name: spire-agent - image: spire-agent-scratch:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/config/agent.conf"] - volumeMounts: - - name: spire-config - mountPath: /run/spire/config - readOnly: true - - name: spire-bundle - mountPath: /run/spire/bundle - readOnly: true - - name: spire-agent-socket - mountPath: /tmp/spire-agent/public - readOnly: false - - name: spire-token - mountPath: /var/run/secrets/tokens - livenessProbe: - httpGet: - path: /live - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - volumes: - - name: spire-config - configMap: - name: spire-agent - - name: spire-bundle - configMap: - name: spire-bundle - - name: spire-agent-socket - hostPath: - path: /run/spire/agent-sockets - type: DirectoryOrCreate - - name: spire-token - projected: - sources: - - serviceAccountToken: - path: spire-agent - expirationSeconds: 7200 - audience: spire-server diff --git a/test/integration/suites/k8s-scratch/conf/kind-config.yaml b/test/integration/suites/k8s-scratch/conf/kind-config.yaml deleted file mode 100644 index 0821814d1f..0000000000 --- a/test/integration/suites/k8s-scratch/conf/kind-config.yaml +++ /dev/null @@ -1,20 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -kubeadmConfigPatches: -- | - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - metadata: - name: config - apiServer: - extraArgs: - "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" - "service-account-issuer": "api" - "service-account-api-audiences": "api,spire-server" - "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" -nodes: -- role: control-plane - image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6 - extraMounts: - - containerPath: /etc/kubernetes/pki/admctrl - hostPath: CONFDIR/admctrl diff --git a/test/integration/suites/k8s-scratch/conf/server/k8s-workload-registrar-secret.yaml b/test/integration/suites/k8s-scratch/conf/server/k8s-workload-registrar-secret.yaml deleted file mode 100644 index 04e2e89756..0000000000 --- a/test/integration/suites/k8s-scratch/conf/server/k8s-workload-registrar-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Kubernetes Secret containing the K8S Workload Registrar server key -apiVersion: v1 -kind: Secret -metadata: - name: k8s-workload-registrar-secret - namespace: spire -type: Opaque -data: - server-key.pem: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3RqS0h2ckVjVWJDdWtlUG8KaXJSMDRqSnZyWW1ONlF3cHlQSlFFTWtsZ3MraFJBTkNBQVJVdzRwSG1XQ3pyZmprWHNlbjkrbVNQemlmV1Y0MwpzNlNaMUorK3h2RFhNMmpPaE04NlZwL1JkQzBtMkZOajNXWWc2c3VSbEV6dmYvRncyQ3N1WmJtbwotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/test/integration/suites/k8s-scratch/conf/server/kustomization.yaml b/test/integration/suites/k8s-scratch/conf/server/kustomization.yaml deleted file mode 100644 index cd3e08e8fc..0000000000 --- a/test/integration/suites/k8s-scratch/conf/server/kustomization.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - k8s-workload-registrar-secret.yaml - - spire-server.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s-scratch/conf/server/spire-server.yaml b/test/integration/suites/k8s-scratch/conf/server/spire-server.yaml deleted file mode 100644 index cbc8d96bc3..0000000000 --- a/test/integration/suites/k8s-scratch/conf/server/spire-server.yaml +++ /dev/null @@ -1,334 +0,0 @@ -# ServiceAccount used by the SPIRE server. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spire-server - namespace: spire - ---- - -# Required cluster role to allow spire-server to query k8s API server -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-cluster-role -rules: -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] - # allow TokenReview requests (to verify service account tokens for PSAT - # attestation) -- apiGroups: ["authentication.k8s.io"] - resources: ["tokenreviews"] - verbs: ["get", "create"] - ---- - -# Binds above cluster role to spire-server service account -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-cluster-role-binding - namespace: spire -subjects: -- kind: ServiceAccount - name: spire-server - namespace: spire -roleRef: - kind: ClusterRole - name: spire-server-cluster-role - apiGroup: rbac.authorization.k8s.io - ---- - -# Role for the SPIRE server -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: spire - name: spire-server-role -rules: - # allow "get" access to pods (to resolve selectors for PSAT attestation) -- apiGroups: [""] - resources: ["pods"] - verbs: ["get"] - # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE - # agent bootstrapping, see the spire-bundle ConfigMap below) -- apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["spire-bundle"] - verbs: ["get", "patch"] - ---- - -# RoleBinding granting the spire-server-role to the SPIRE server -# service account. -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-role-binding - namespace: spire -subjects: -- kind: ServiceAccount - name: spire-server - namespace: spire -roleRef: - kind: Role - name: spire-server-role - apiGroup: rbac.authorization.k8s.io - ---- - -# ConfigMap containing the latest trust bundle for the trust domain. It is -# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount -# this config map and use the certificate to bootstrap trust with the SPIRE -# server during attestation. -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-bundle - namespace: spire - ---- - -# ConfigMap containing the SPIRE server configuration. -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-server - namespace: spire -data: - server.conf: | - server { - bind_address = "0.0.0.0" - bind_port = "8081" - trust_domain = "example.org" - data_dir = "/run/spire/data" - log_level = "DEBUG" - default_svid_ttl = "1h" - ca_ttl = "12h" - ca_subject = { - country = ["US"] - organization = ["SPIFFE"] - common_name = "" - } - } - - plugins { - DataStore "sql" { - plugin_data { - database_type = "sqlite3" - connection_string = "/run/spire/data/datastore.sqlite3" - } - } - - NodeAttestor "k8s_psat" { - plugin_data { - clusters = { - "example-cluster" = { - service_account_allow_list = ["spire:spire-agent"] - } - } - } - } - - KeyManager "disk" { - plugin_data { - keys_path = "/run/spire/data/keys.json" - } - } - - Notifier "k8sbundle" { - plugin_data { - # This plugin updates the bundle.crt value in the spire:spire-bundle - # ConfigMap by default, so no additional configuration is necessary. - } - } - } - - health_checks { - listener_enabled = true - bind_address = "0.0.0.0" - bind_port = "8080" - live_path = "/live" - ready_path = "/ready" - } - ---- - -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar - namespace: spire -data: - k8s-workload-registrar.conf: | - cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" - key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" - cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" - trust_domain = "example.org" - cluster = "example-cluster" - server_socket_path = "/tmp/spire-server/private/api.sock" - ---- - -# ConfigMap containing the K8S Workload Registrar server certificate and -# CA bundle used to verify the client certificate presented by the API server. -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar-certs - namespace: spire -data: - server-cert.pem: | - -----BEGIN CERTIFICATE----- - MIIB5zCCAY6gAwIBAgIIQhiO2hfTsKQwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ - SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 - MTIzMTIzNTk1OVowKDEmMCQGA1UEAxMdSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBT - RVJWRVIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARUw4pHmWCzrfjkXsen9+mS - PzifWV43s6SZ1J++xvDXM2jOhM86Vp/RdC0m2FNj3WYg6suRlEzvf/Fw2CsuZbmo - o4GjMIGgMA4GA1UdDwEB/wQEAwIDqDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNV - HRMBAf8EAjAAMB0GA1UdDgQWBBS+rw+LUFZAT45Ia8SnrfdWOBtAAzAfBgNVHSME - GDAWgBSbrFJP/YFLhe+FSq6yHYEsvS9EwjArBgNVHREEJDAigiBrOHMtd29ya2xv - YWQtcmVnaXN0cmFyLnNwaXJlLnN2YzAKBggqhkjOPQQDAgNHADBEAiBSaDzjPws6 - Kt68mcJGAYBuWasdgdXJXeySzcnfieXe5AIgXwwaeq+deuF4+ckEY6WIzNWoIPOd - SDoLJWybQN17R0M= - -----END CERTIFICATE----- - - cacert.pem: | - -----BEGIN CERTIFICATE----- - MIIBgTCCASigAwIBAgIIVLxbHbQsZQMwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ - SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 - MTIzMTIzNTk1OVowJDEiMCAGA1UEAxMZSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBD - QTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJNq7IL77XWiWbohBOsmrCKMj+g3 - z/+U0c5HmXRj7lbSpjofS0Y1RkTHMEJSvAoMHzssCe5/MDMHX5Xnn4r/LSGjQjBA - MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSbrFJP - /YFLhe+FSq6yHYEsvS9EwjAKBggqhkjOPQQDAgNHADBEAiBaun9z1WGCSkjx4P+x - mhZkiu1HsOifT9SGQx3in48OSgIgJm02lvnuuKcO/YT2CGHqZ7QjGAnJQY6uLgEQ - 7CXLvcI= - -----END CERTIFICATE----- - ---- - -# This is the Deployment for the SPIRE server. It waits for SPIRE database to -# initialize and uses the SPIRE healthcheck command for liveness/readiness -# probes. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spire-server - namespace: spire - labels: - app: spire-server -spec: - replicas: 1 - selector: - matchLabels: - app: spire-server - template: - metadata: - namespace: spire - labels: - app: spire-server - spec: - serviceAccountName: spire-server - shareProcessNamespace: true - containers: - - name: spire-server - image: spire-server-scratch:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/config/server.conf"] - ports: - - containerPort: 8081 - volumeMounts: - - name: spire-config - mountPath: /run/spire/config - readOnly: true - - name: spire-server-socket - mountPath: /tmp/spire-server/private - readOnly: false - livenessProbe: - httpGet: - path: /live - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - - name: k8s-workload-registrar - image: k8s-workload-registrar-scratch:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/k8s-workload-registrar/conf/k8s-workload-registrar.conf"] - ports: - - containerPort: 8443 - name: registrar-port - volumeMounts: - - name: spire-server-socket - mountPath: /tmp/spire-server/private - readOnly: true - - name: k8s-workload-registrar - mountPath: /run/spire/k8s-workload-registrar/conf - readOnly: true - - name: k8s-workload-registrar-certs - mountPath: /run/spire/k8s-workload-registrar/certs - readOnly: true - - name: k8s-workload-registrar-secret - mountPath: /run/spire/k8s-workload-registrar/secret - readOnly: true - volumes: - - name: spire-config - configMap: - name: spire-server - - name: spire-server-socket - hostPath: - path: /run/spire/server-sockets - type: DirectoryOrCreate - - name: k8s-workload-registrar - configMap: - name: k8s-workload-registrar - - name: k8s-workload-registrar-certs - configMap: - name: k8s-workload-registrar-certs - - name: k8s-workload-registrar-secret - secret: - secretName: k8s-workload-registrar-secret - ---- - -# Service definition for SPIRE server defining the gRPC port. -apiVersion: v1 -kind: Service -metadata: - name: spire-server - namespace: spire -spec: - type: NodePort - ports: - - name: grpc - port: 8081 - targetPort: 8081 - protocol: TCP - selector: - app: spire-server - ---- - -# Service definition for the admission webhook -apiVersion: v1 -kind: Service -metadata: - name: k8s-workload-registrar - namespace: spire -spec: - selector: - app: spire-server - ports: - - port: 443 - targetPort: registrar-port - - diff --git a/test/integration/suites/k8s-scratch/conf/webhook/kustomization.yaml b/test/integration/suites/k8s-scratch/conf/webhook/kustomization.yaml deleted file mode 100644 index 02462d2984..0000000000 --- a/test/integration/suites/k8s-scratch/conf/webhook/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - validation-webhook.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s-scratch/conf/webhook/validation-webhook.yaml b/test/integration/suites/k8s-scratch/conf/webhook/validation-webhook.yaml deleted file mode 100644 index 5576edd029..0000000000 --- a/test/integration/suites/k8s-scratch/conf/webhook/validation-webhook.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Validating Webhook Configuration for the K8S Workload Registrar -# -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: ValidatingWebhookConfiguration -metadata: - name: k8s-workload-registrar-webhook -webhooks: - - name: k8s-workload-registrar.spire.svc - clientConfig: - service: - name: k8s-workload-registrar - namespace: spire - path: "/validate" - caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnVENDQVNpZ0F3SUJBZ0lJVkx4YkhiUXNaUU13Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93SkRFaU1DQUdBMVVFQXhNWlN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClFUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKTnE3SUw3N1hXaVdib2hCT3NtckNLTWorZzMKei8rVTBjNUhtWFJqN2xiU3Bqb2ZTMFkxUmtUSE1FSlN2QW9NSHpzc0NlNS9NRE1IWDVYbm40ci9MU0dqUWpCQQpNQTRHQTFVZER3RUIvd1FFQXdJQmhqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2JyRkpQCi9ZRkxoZStGU3E2eUhZRXN2UzlFd2pBS0JnZ3Foa2pPUFFRREFnTkhBREJFQWlCYXVuOXoxV0dDU2tqeDRQK3gKbWhaa2l1MUhzT2lmVDlTR1F4M2luNDhPU2dJZ0ptMDJsdm51dUtjTy9ZVDJDR0hxWjdRakdBbkpRWTZ1TGdFUQo3Q1hMdmNJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - admissionReviewVersions: - - v1beta1 - rules: - - apiGroups: [""] - apiVersions: ["v1"] - operations: ["CREATE", "DELETE"] - resources: ["pods"] - scope: "Namespaced" diff --git a/test/integration/suites/k8s-scratch/conf/workload.yaml b/test/integration/suites/k8s-scratch/conf/workload.yaml deleted file mode 100644 index e9c7ca86d4..0000000000 --- a/test/integration/suites/k8s-scratch/conf/workload.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: example-workload - namespace: spire - labels: - app: example-workload -spec: - selector: - matchLabels: - app: example-workload - template: - metadata: - namespace: spire - labels: - app: example-workload - spire-workload: example-workload - spec: - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: example-workload - image: spire-agent-scratch:latest-local - command: ["/opt/spire/bin/spire-agent", "api", "watch"] - args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] - volumeMounts: - - name: spire-agent-socket - mountPath: /tmp/spire-agent/public - readOnly: true - volumes: - - name: spire-agent-socket - hostPath: - path: /run/spire/agent-sockets - type: Directory diff --git a/test/integration/suites/k8s-scratch/init-kubectl b/test/integration/suites/k8s-scratch/init-kubectl deleted file mode 100644 index b689f1f417..0000000000 --- a/test/integration/suites/k8s-scratch/init-kubectl +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -KUBECONFIG="${RUNDIR}/kubeconfig" -if [ ! -f "${RUNDIR}/kubeconfig" ]; then - ./bin/kind get kubeconfig --name=k8stest > "${RUNDIR}/kubeconfig" -fi -export KUBECONFIG - diff --git a/test/integration/suites/k8s-scratch/teardown b/test/integration/suites/k8s-scratch/teardown deleted file mode 100755 index d0c69ac504..0000000000 --- a/test/integration/suites/k8s-scratch/teardown +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -source init-kubectl - -if [ -z "$SUCCESS" ]; then - ./bin/kubectl -nspire logs deployment/spire-server --all-containers || true - ./bin/kubectl -nspire logs daemonset/spire-agent --all-containers || true - ./bin/kubectl -nspire logs deployment/example-workload --all-containers || true -fi - -export KUBECONFIG= -./bin/kind delete cluster --name k8stest diff --git a/test/integration/suites/k8s/00-setup b/test/integration/suites/k8s/00-setup deleted file mode 100755 index 4073455bb5..0000000000 --- a/test/integration/suites/k8s/00-setup +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -# Create a temporary path that will be added to the PATH to avoid picking up -# binaries from the environment that aren't a version match. -mkdir -p ./bin - -KIND_PATH=./bin/kind -KUBECTL_PATH=./bin/kubectl - -# Download kind at the expected version at the given path. -download-kind "${KIND_PATH}" - -# Download kubectl at the expected version. -download-kubectl "${KUBECTL_PATH}" - -# We must supply an absolute path to the configuration directory. Replace the -# CONFDIR variable in the kind configuration with the conf directory of the -# running test. -sed -i.bak "s#CONFDIR#${PWD}/conf#g" conf/kind-config.yaml -rm conf/kind-config.yaml.bak - -# Start the kind cluster. -start-kind-cluster "${KIND_PATH}" k8stest ./conf/kind-config.yaml - -# Load the given images in the cluster. -container_images=("spire-server:latest-local" "spire-agent:latest-local" "k8s-workload-registrar:latest-local") -load-images "${KIND_PATH}" k8stest "${container_images[@]}" - -# Set the kubectl context. -set-kubectl-context "${KUBECTL_PATH}" kind-k8stest diff --git a/test/integration/suites/k8s/01-apply-config b/test/integration/suites/k8s/01-apply-config deleted file mode 100755 index 4286ab9444..0000000000 --- a/test/integration/suites/k8s/01-apply-config +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -source init-kubectl - -wait-for-rollout() { - ns=$1 - obj=$2 - MAXROLLOUTCHECKS=12 - ROLLOUTCHECKINTERVAL=15s - for ((i=0; i<${MAXROLLOUTCHECKS}; i++)); do - log-info "checking rollout status for ${ns} ${obj}..." - if ./bin/kubectl "-n${ns}" rollout status "$obj" --timeout="${ROLLOUTCHECKINTERVAL}"; then - return - fi - log-warn "describing ${ns} ${obj}..." - ./bin/kubectl "-n${ns}" describe "$obj" || true - log-warn "logs for ${ns} ${obj}..." - ./bin/kubectl "-n${ns}" logs --all-containers "$obj" || true - done - fail-now "Failed waiting for ${obj} to roll out." -} - -./bin/kubectl create namespace spire -./bin/kubectl apply -k ./conf/server -wait-for-rollout spire deployment/spire-server -./bin/kubectl apply -k ./conf/webhook -./bin/kubectl apply -k ./conf/agent -wait-for-rollout spire daemonset/spire-agent - -# Apply this separately after all of the spire infrastructure has been rolled -# out, otherwise the k8s-workload-registrar might miss its chance to create -# an entry for it -./bin/kubectl apply -f ./conf/workload.yaml -wait-for-rollout spire deployment/example-workload diff --git a/test/integration/suites/k8s/02-check-for-workload-svid b/test/integration/suites/k8s/02-check-for-workload-svid deleted file mode 100755 index 12e62e804a..0000000000 --- a/test/integration/suites/k8s/02-check-for-workload-svid +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -source init-kubectl - -MAXFETCHCHECKS=60 -FETCHCHECKINTERVAL=1 -for ((i=1; i<=${MAXFETCHCHECKS}; i++)); do - EXAMPLEPOD=$(./bin/kubectl -nspire get pod -l app=example-workload -o jsonpath="{.items[0].metadata.name}") - log-info "checking for workload SPIFFE ID ($i of $MAXFETCHCHECKS max)..." - if ./bin/kubectl -nspire exec -t "${EXAMPLEPOD}" -- \ - /opt/spire/bin/spire-agent api fetch \ - | grep "SPIFFE ID:"; then - DONE=1 - break - fi - sleep "${FETCHCHECKINTERVAL}" -done - -if [ "${DONE}" -eq 1 ]; then - log-info "SPIFFE ID found." -else - fail-now "timed out waiting for workload to obtain credentials." -fi diff --git a/test/integration/suites/k8s/README.md b/test/integration/suites/k8s/README.md deleted file mode 100644 index f3385388a6..0000000000 --- a/test/integration/suites/k8s/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Kubernetes Suite - -## Description - -This suite sets up a Kubernetes cluster using [Kind](https://kind.sigs.k8s.io) and asserts the following: - -* SPIRE server attests SPIRE agents by verifying Kubernetes Projected Service - Account Tokens (i.e. `k8s_psat`) via the Token Review API. -* Workloads are registered via the K8S Workload Registrar (webhook mode) and are able to - obtain identities without the need for manually maintained registration - entries. diff --git a/test/integration/suites/k8s/conf/admctrl/admission-control.yaml b/test/integration/suites/k8s/conf/admctrl/admission-control.yaml deleted file mode 100644 index 05480c2df9..0000000000 --- a/test/integration/suites/k8s/conf/admctrl/admission-control.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: apiserver.k8s.io/v1alpha1 -kind: AdmissionConfiguration -plugins: -- name: ValidatingAdmissionWebhook - configuration: - apiVersion: apiserver.config.k8s.io/v1alpha1 - kind: WebhookAdmission - kubeConfigFile: /etc/kubernetes/pki/admctrl/kubeconfig.yaml diff --git a/test/integration/suites/k8s/conf/admctrl/kubeconfig.yaml b/test/integration/suites/k8s/conf/admctrl/kubeconfig.yaml deleted file mode 100644 index 72942c5ae7..0000000000 --- a/test/integration/suites/k8s/conf/admctrl/kubeconfig.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# KubeConfig with client credentials for the API Server to use to call the -# K8S Workload Registrar service -apiVersion: v1 -kind: Config -users: -- name: k8s-workload-registrar.spire.svc - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJ1VENDQVYrZ0F3SUJBZ0lJVVNIdmpGQTFxRHd3Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93S0RFbU1DUUdBMVVFQXhNZFN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClRFbEZUbFF3V1RBVEJnY3Foa2pPUFFJQkJnZ3Foa2pPUFFNQkJ3TkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYKMXk0VDVKTVdBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQpvM1V3Y3pBT0JnTlZIUThCQWY4RUJBTUNBNmd3RXdZRFZSMGxCQXd3Q2dZSUt3WUJCUVVIQXdJd0RBWURWUjBUCkFRSC9CQUl3QURBZEJnTlZIUTRFRmdRVW9EYlBiOUpWNXhqZlZVMnBhSzd2UUNsZ2d3SXdId1lEVlIwakJCZ3cKRm9BVW02eFNULzJCUzRYdmhVcXVzaDJCTEwwdlJNSXdDZ1lJS29aSXpqMEVBd0lEU0FBd1JRSWdHNzRQeWkyZQpONlBEcVRGRnY1UDFjNFhjVVdERzMwdzJIZEU4Wm8rMStVWUNJUURUL2xMa2dUUjUzV01INVRqWkllblhmYzFjCmxkMGlqSmpvRFJIR3lIRjJxdz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K - client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ1BhSWtTTVowUmduQllWYncKMDIrdlN5UUpDM2RtZ0VDNFBLN2svTnk4Qnh1aFJBTkNBQVM3SDIrMjJOcEFhTmVRdXQvZEYwdUYxeTRUNUpNVwpBczJOYm9NOXhZdlFKb1FXTVVNNERobWZQT1hVaE5STXdkb1JzTmhSdXZsYkROY2FEU29tNE1DYQotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/test/integration/suites/k8s/conf/agent/kustomization.yaml b/test/integration/suites/k8s/conf/agent/kustomization.yaml deleted file mode 100644 index 571ffcf771..0000000000 --- a/test/integration/suites/k8s/conf/agent/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - spire-agent.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s/conf/agent/spire-agent.yaml b/test/integration/suites/k8s/conf/agent/spire-agent.yaml deleted file mode 100644 index f66526235b..0000000000 --- a/test/integration/suites/k8s/conf/agent/spire-agent.yaml +++ /dev/null @@ -1,167 +0,0 @@ -# ServiceAccount for the SPIRE agent -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spire-agent - namespace: spire - ---- - -# Required cluster role to allow spire-agent to query k8s API server -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-agent-cluster-role -rules: -- apiGroups: [""] - resources: ["pods","nodes","nodes/proxy"] - verbs: ["get"] - ---- - -# Binds above cluster role to spire-agent service account -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-agent-cluster-role-binding -subjects: -- kind: ServiceAccount - name: spire-agent - namespace: spire -roleRef: - kind: ClusterRole - name: spire-agent-cluster-role - apiGroup: rbac.authorization.k8s.io - - ---- - -# ConfigMap for the SPIRE agent featuring: -# 1) PSAT node attestation -# 2) K8S Workload Attestation over the secure kubelet port -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-agent - namespace: spire -data: - agent.conf: | - agent { - data_dir = "/run/spire" - log_level = "DEBUG" - server_address = "spire-server" - server_port = "8081" - trust_bundle_path = "/run/spire/bundle/bundle.crt" - trust_domain = "example.org" - } - - plugins { - NodeAttestor "k8s_psat" { - plugin_data { - cluster = "example-cluster" - } - } - - KeyManager "memory" { - plugin_data { - } - } - - WorkloadAttestor "k8s" { - plugin_data { - # Defaults to the secure kubelet port by default. - # Minikube does not have a cert in the cluster CA bundle that - # can authenticate the kubelet cert, so skip validation. - skip_kubelet_verification = true - } - } - } - - health_checks { - listener_enabled = true - bind_address = "0.0.0.0" - bind_port = "8080" - live_path = "/live" - ready_path = "/ready" - } - ---- - -apiVersion: apps/v1 -kind: DaemonSet -metadata: - name: spire-agent - namespace: spire - labels: - app: spire-agent -spec: - selector: - matchLabels: - app: spire-agent - updateStrategy: - type: RollingUpdate - template: - metadata: - namespace: spire - labels: - app: spire-agent - spec: - # hostPID is required for K8S Workload Attestation. - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: spire-agent - initContainers: - - name: init - # This is a small image with wait-for-it, choose whatever image - # you prefer that waits for a service to be up. This image is built - # from https://github.com/lqhl/wait-for-it - image: gcr.io/spiffe-io/wait-for-it - args: ["-t", "30", "spire-server:8081"] - containers: - - name: spire-agent - image: spire-agent:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/config/agent.conf"] - volumeMounts: - - name: spire-config - mountPath: /run/spire/config - readOnly: true - - name: spire-bundle - mountPath: /run/spire/bundle - readOnly: true - - name: spire-agent-socket - mountPath: /tmp/spire-agent/public - readOnly: false - - name: spire-token - mountPath: /var/run/secrets/tokens - livenessProbe: - httpGet: - path: /live - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 10 - periodSeconds: 10 - volumes: - - name: spire-config - configMap: - name: spire-agent - - name: spire-bundle - configMap: - name: spire-bundle - - name: spire-agent-socket - hostPath: - path: /run/spire/agent-sockets - type: DirectoryOrCreate - - name: spire-token - projected: - sources: - - serviceAccountToken: - path: spire-agent - expirationSeconds: 7200 - audience: spire-server diff --git a/test/integration/suites/k8s/conf/kind-config.yaml b/test/integration/suites/k8s/conf/kind-config.yaml deleted file mode 100644 index 173445df06..0000000000 --- a/test/integration/suites/k8s/conf/kind-config.yaml +++ /dev/null @@ -1,20 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -kubeadmConfigPatches: -- | - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - metadata: - name: config - apiServer: - extraArgs: - "service-account-signing-key-file": "/etc/kubernetes/pki/sa.key" - "service-account-issuer": "api" - "service-account-api-audiences": "api,spire-server" - "admission-control-config-file": "/etc/kubernetes/pki/admctrl/admission-control.yaml" -nodes: -- role: control-plane - image: kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6 - extraMounts: - - containerPath: /etc/kubernetes/pki/admctrl - hostPath: CONFDIR/admctrl diff --git a/test/integration/suites/k8s/conf/server/k8s-workload-registrar-secret.yaml b/test/integration/suites/k8s/conf/server/k8s-workload-registrar-secret.yaml deleted file mode 100644 index 04e2e89756..0000000000 --- a/test/integration/suites/k8s/conf/server/k8s-workload-registrar-secret.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# Kubernetes Secret containing the K8S Workload Registrar server key -apiVersion: v1 -kind: Secret -metadata: - name: k8s-workload-registrar-secret - namespace: spire -type: Opaque -data: - server-key.pem: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JR0hBZ0VBTUJNR0J5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEJHMHdhd0lCQVFRZ3RqS0h2ckVjVWJDdWtlUG8KaXJSMDRqSnZyWW1ONlF3cHlQSlFFTWtsZ3MraFJBTkNBQVJVdzRwSG1XQ3pyZmprWHNlbjkrbVNQemlmV1Y0MwpzNlNaMUorK3h2RFhNMmpPaE04NlZwL1JkQzBtMkZOajNXWWc2c3VSbEV6dmYvRncyQ3N1WmJtbwotLS0tLUVORCBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/test/integration/suites/k8s/conf/server/kustomization.yaml b/test/integration/suites/k8s/conf/server/kustomization.yaml deleted file mode 100644 index cd3e08e8fc..0000000000 --- a/test/integration/suites/k8s/conf/server/kustomization.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - k8s-workload-registrar-secret.yaml - - spire-server.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s/conf/server/spire-server.yaml b/test/integration/suites/k8s/conf/server/spire-server.yaml deleted file mode 100644 index cecdeaebdc..0000000000 --- a/test/integration/suites/k8s/conf/server/spire-server.yaml +++ /dev/null @@ -1,332 +0,0 @@ -# ServiceAccount used by the SPIRE server. -apiVersion: v1 -kind: ServiceAccount -metadata: - name: spire-server - namespace: spire - ---- - -# Required cluster role to allow spire-server to query k8s API server -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-cluster-role -rules: -- apiGroups: [""] - resources: ["nodes"] - verbs: ["get"] - # allow TokenReview requests (to verify service account tokens for PSAT - # attestation) -- apiGroups: ["authentication.k8s.io"] - resources: ["tokenreviews"] - verbs: ["get", "create"] - ---- - -# Binds above cluster role to spire-server service account -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-cluster-role-binding - namespace: spire -subjects: -- kind: ServiceAccount - name: spire-server - namespace: spire -roleRef: - kind: ClusterRole - name: spire-server-cluster-role - apiGroup: rbac.authorization.k8s.io - ---- - -# Role for the SPIRE server -kind: Role -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - namespace: spire - name: spire-server-role -rules: - # allow "get" access to pods (to resolve selectors for PSAT attestation) -- apiGroups: [""] - resources: ["pods"] - verbs: ["get"] - # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE - # agent bootstrapping, see the spire-bundle ConfigMap below) -- apiGroups: [""] - resources: ["configmaps"] - resourceNames: ["spire-bundle"] - verbs: ["get", "patch"] - ---- - -# RoleBinding granting the spire-server-role to the SPIRE server -# service account. -kind: RoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: spire-server-role-binding - namespace: spire -subjects: -- kind: ServiceAccount - name: spire-server - namespace: spire -roleRef: - kind: Role - name: spire-server-role - apiGroup: rbac.authorization.k8s.io - ---- - -# ConfigMap containing the latest trust bundle for the trust domain. It is -# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount -# this config map and use the certificate to bootstrap trust with the SPIRE -# server during attestation. -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-bundle - namespace: spire - ---- - -# ConfigMap containing the SPIRE server configuration. -apiVersion: v1 -kind: ConfigMap -metadata: - name: spire-server - namespace: spire -data: - server.conf: | - server { - bind_address = "0.0.0.0" - bind_port = "8081" - trust_domain = "example.org" - data_dir = "/run/spire/data" - log_level = "DEBUG" - default_svid_ttl = "1h" - ca_ttl = "12h" - ca_subject { - country = ["US"] - organization = ["SPIFFE"] - common_name = "" - } - } - - plugins { - DataStore "sql" { - plugin_data { - database_type = "sqlite3" - connection_string = "/run/spire/data/datastore.sqlite3" - } - } - - NodeAttestor "k8s_psat" { - plugin_data { - clusters = { - "example-cluster" = { - service_account_allow_list = ["spire:spire-agent"] - } - } - } - } - - KeyManager "disk" { - plugin_data { - keys_path = "/run/spire/data/keys.json" - } - } - - Notifier "k8sbundle" { - plugin_data { - # This plugin updates the bundle.crt value in the spire:spire-bundle - # ConfigMap by default, so no additional configuration is necessary. - } - } - } - - health_checks { - listener_enabled = true - bind_address = "0.0.0.0" - bind_port = "8080" - live_path = "/live" - ready_path = "/ready" - } - ---- - -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar - namespace: spire -data: - k8s-workload-registrar.conf: | - cert_path = "/run/spire/k8s-workload-registrar/certs/server-cert.pem" - key_path = "/run/spire/k8s-workload-registrar/secret/server-key.pem" - cacert_path = "/run/spire/k8s-workload-registrar/certs/cacert.pem" - trust_domain = "example.org" - cluster = "example-cluster" - server_socket_path = "/tmp/spire-server/private/api.sock" - ---- - -# ConfigMap containing the K8S Workload Registrar server certificate and -# CA bundle used to verify the client certificate presented by the API server. -# -apiVersion: v1 -kind: ConfigMap -metadata: - name: k8s-workload-registrar-certs - namespace: spire -data: - server-cert.pem: | - -----BEGIN CERTIFICATE----- - MIIB5zCCAY6gAwIBAgIIQhiO2hfTsKQwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ - SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 - MTIzMTIzNTk1OVowKDEmMCQGA1UEAxMdSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBT - RVJWRVIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAARUw4pHmWCzrfjkXsen9+mS - PzifWV43s6SZ1J++xvDXM2jOhM86Vp/RdC0m2FNj3WYg6suRlEzvf/Fw2CsuZbmo - o4GjMIGgMA4GA1UdDwEB/wQEAwIDqDATBgNVHSUEDDAKBggrBgEFBQcDATAMBgNV - HRMBAf8EAjAAMB0GA1UdDgQWBBS+rw+LUFZAT45Ia8SnrfdWOBtAAzAfBgNVHSME - GDAWgBSbrFJP/YFLhe+FSq6yHYEsvS9EwjArBgNVHREEJDAigiBrOHMtd29ya2xv - YWQtcmVnaXN0cmFyLnNwaXJlLnN2YzAKBggqhkjOPQQDAgNHADBEAiBSaDzjPws6 - Kt68mcJGAYBuWasdgdXJXeySzcnfieXe5AIgXwwaeq+deuF4+ckEY6WIzNWoIPOd - SDoLJWybQN17R0M= - -----END CERTIFICATE----- - - cacert.pem: | - -----BEGIN CERTIFICATE----- - MIIBgTCCASigAwIBAgIIVLxbHbQsZQMwCgYIKoZIzj0EAwIwJDEiMCAGA1UEAxMZ - SzhTIFdPUktMT0FEIFJFR0lTVFJBUiBDQTAgFw0xOTA1MTMxOTE0MjNaGA85OTk5 - MTIzMTIzNTk1OVowJDEiMCAGA1UEAxMZSzhTIFdPUktMT0FEIFJFR0lTVFJBUiBD - QTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJNq7IL77XWiWbohBOsmrCKMj+g3 - z/+U0c5HmXRj7lbSpjofS0Y1RkTHMEJSvAoMHzssCe5/MDMHX5Xnn4r/LSGjQjBA - MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSbrFJP - /YFLhe+FSq6yHYEsvS9EwjAKBggqhkjOPQQDAgNHADBEAiBaun9z1WGCSkjx4P+x - mhZkiu1HsOifT9SGQx3in48OSgIgJm02lvnuuKcO/YT2CGHqZ7QjGAnJQY6uLgEQ - 7CXLvcI= - -----END CERTIFICATE----- - ---- - -# This is the Deployment for the SPIRE server. It waits for SPIRE database to -# initialize and uses the SPIRE healthcheck command for liveness/readiness -# probes. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: spire-server - namespace: spire - labels: - app: spire-server -spec: - replicas: 1 - selector: - matchLabels: - app: spire-server - template: - metadata: - namespace: spire - labels: - app: spire-server - spec: - serviceAccountName: spire-server - shareProcessNamespace: true - containers: - - name: spire-server - image: spire-server:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/config/server.conf"] - ports: - - containerPort: 8081 - volumeMounts: - - name: spire-config - mountPath: /run/spire/config - readOnly: true - - name: spire-server-socket - mountPath: /tmp/spire-server/private - readOnly: false - livenessProbe: - httpGet: - path: /live - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - - name: k8s-workload-registrar - image: k8s-workload-registrar:latest-local - imagePullPolicy: Never - args: ["-config", "/run/spire/k8s-workload-registrar/conf/k8s-workload-registrar.conf"] - ports: - - containerPort: 8443 - name: registrar-port - volumeMounts: - - name: spire-server-socket - mountPath: /tmp/spire-server/private - readOnly: true - - name: k8s-workload-registrar - mountPath: /run/spire/k8s-workload-registrar/conf - readOnly: true - - name: k8s-workload-registrar-certs - mountPath: /run/spire/k8s-workload-registrar/certs - readOnly: true - - name: k8s-workload-registrar-secret - mountPath: /run/spire/k8s-workload-registrar/secret - readOnly: true - volumes: - - name: spire-config - configMap: - name: spire-server - - name: spire-server-socket - hostPath: - path: /run/spire/server-sockets - type: DirectoryOrCreate - - name: k8s-workload-registrar - configMap: - name: k8s-workload-registrar - - name: k8s-workload-registrar-certs - configMap: - name: k8s-workload-registrar-certs - - name: k8s-workload-registrar-secret - secret: - secretName: k8s-workload-registrar-secret - ---- - -# Service definition for SPIRE server defining the gRPC port. -apiVersion: v1 -kind: Service -metadata: - name: spire-server - namespace: spire -spec: - type: NodePort - ports: - - name: grpc - port: 8081 - targetPort: 8081 - protocol: TCP - selector: - app: spire-server - ---- - -# Service definition for the admission webhook -apiVersion: v1 -kind: Service -metadata: - name: k8s-workload-registrar - namespace: spire -spec: - selector: - app: spire-server - ports: - - port: 443 - targetPort: registrar-port diff --git a/test/integration/suites/k8s/conf/webhook/kustomization.yaml b/test/integration/suites/k8s/conf/webhook/kustomization.yaml deleted file mode 100644 index 02462d2984..0000000000 --- a/test/integration/suites/k8s/conf/webhook/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# kustomization.yaml -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -# list of Resource Config to be Applied -resources: - - validation-webhook.yaml - -# namespace to deploy all Resources to -namespace: spire diff --git a/test/integration/suites/k8s/conf/webhook/validation-webhook.yaml b/test/integration/suites/k8s/conf/webhook/validation-webhook.yaml deleted file mode 100644 index 6d1f7acabf..0000000000 --- a/test/integration/suites/k8s/conf/webhook/validation-webhook.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# Validating Webhook Configuration for the K8S Workload Registrar -# -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - name: k8s-workload-registrar-webhook -webhooks: - - name: k8s-workload-registrar.spire.svc - clientConfig: - service: - name: k8s-workload-registrar - namespace: spire - path: "/validate" - caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJnVENDQVNpZ0F3SUJBZ0lJVkx4YkhiUXNaUU13Q2dZSUtvWkl6ajBFQXdJd0pERWlNQ0FHQTFVRUF4TVoKU3poVElGZFBVa3RNVDBGRUlGSkZSMGxUVkZKQlVpQkRRVEFnRncweE9UQTFNVE14T1RFME1qTmFHQTg1T1RrNQpNVEl6TVRJek5UazFPVm93SkRFaU1DQUdBMVVFQXhNWlN6aFRJRmRQVWt0TVQwRkVJRkpGUjBsVFZGSkJVaUJEClFUQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJKTnE3SUw3N1hXaVdib2hCT3NtckNLTWorZzMKei8rVTBjNUhtWFJqN2xiU3Bqb2ZTMFkxUmtUSE1FSlN2QW9NSHpzc0NlNS9NRE1IWDVYbm40ci9MU0dqUWpCQQpNQTRHQTFVZER3RUIvd1FFQXdJQmhqQVBCZ05WSFJNQkFmOEVCVEFEQVFIL01CMEdBMVVkRGdRV0JCU2JyRkpQCi9ZRkxoZStGU3E2eUhZRXN2UzlFd2pBS0JnZ3Foa2pPUFFRREFnTkhBREJFQWlCYXVuOXoxV0dDU2tqeDRQK3gKbWhaa2l1MUhzT2lmVDlTR1F4M2luNDhPU2dJZ0ptMDJsdm51dUtjTy9ZVDJDR0hxWjdRakdBbkpRWTZ1TGdFUQo3Q1hMdmNJPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - admissionReviewVersions: - - v1 - - v1beta1 - sideEffects: None - timeoutSeconds: 10 - rules: - - apiGroups: [""] - apiVersions: ["v1"] - operations: ["CREATE", "DELETE"] - resources: ["pods"] - scope: "Namespaced" diff --git a/test/integration/suites/k8s/conf/workload.yaml b/test/integration/suites/k8s/conf/workload.yaml deleted file mode 100644 index 59fad28183..0000000000 --- a/test/integration/suites/k8s/conf/workload.yaml +++ /dev/null @@ -1,35 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: example-workload - namespace: spire - labels: - app: example-workload -spec: - selector: - matchLabels: - app: example-workload - template: - metadata: - namespace: spire - labels: - app: example-workload - spire-workload: example-workload - spec: - hostPID: true - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - containers: - - name: example-workload - image: spire-agent:latest-local - command: ["/usr/bin/dumb-init", "/opt/spire/bin/spire-agent", "api", "watch"] - args: ["-socketPath", "/tmp/spire-agent/public/api.sock"] - volumeMounts: - - name: spire-agent-socket - mountPath: /tmp/spire-agent/public - readOnly: true - volumes: - - name: spire-agent-socket - hostPath: - path: /run/spire/agent-sockets - type: Directory diff --git a/test/integration/suites/k8s/init-kubectl b/test/integration/suites/k8s/init-kubectl deleted file mode 100644 index b689f1f417..0000000000 --- a/test/integration/suites/k8s/init-kubectl +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -KUBECONFIG="${RUNDIR}/kubeconfig" -if [ ! -f "${RUNDIR}/kubeconfig" ]; then - ./bin/kind get kubeconfig --name=k8stest > "${RUNDIR}/kubeconfig" -fi -export KUBECONFIG - diff --git a/test/integration/suites/k8s/teardown b/test/integration/suites/k8s/teardown deleted file mode 100755 index d0c69ac504..0000000000 --- a/test/integration/suites/k8s/teardown +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -source init-kubectl - -if [ -z "$SUCCESS" ]; then - ./bin/kubectl -nspire logs deployment/spire-server --all-containers || true - ./bin/kubectl -nspire logs daemonset/spire-agent --all-containers || true - ./bin/kubectl -nspire logs deployment/example-workload --all-containers || true -fi - -export KUBECONFIG= -./bin/kind delete cluster --name k8stest From e415459ca26d1a56b44c56e9ca98c00b8df9fd70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Tue, 19 Jul 2022 10:53:25 +0200 Subject: [PATCH 06/13] fixes review finding checks cgroups against regex array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian Görg --- .../plugin/workloadattestor/k8s/k8s_posix.go | 95 ++++++++++++------- .../plugin/workloadattestor/k8s/k8s_test.go | 2 +- 2 files changed, 61 insertions(+), 36 deletions(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 06c58cdb1f..0c4fc30ca6 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -194,8 +194,8 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque for _, item := range list.Items { item := item - // podUID can be empty, when cgroup contains only containerID - if item.UID != podUID && podUID != "" { + // if podUID is not empty, we skip the item, when UIDs don`t match + if podUID != "" && item.UID != podUID { continue } @@ -204,7 +204,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque case containerInPod: if attestResponse != nil { log.Warn("Two pods found with same container Id") - return nil, status.Error(codes.Aborted, "Two pods found with same container Id") + return nil, status.Error(codes.Internal, "two pods found with same container Id") } attestResponse = &workloadattestorv1.AttestResponse{ SelectorValues: getSelectorValuesFromPodInfo(&item, lookupStatus), @@ -575,32 +575,47 @@ func getPodUIDAndContainerIDFromCGroups(cgroups []cgroups.Cgroup) (types.UID, st return podUID, containerID, nil } -// cgroupRE is the regex used to parse out the pod UID and container ID from a -// cgroup name. It assumes that any ".scope" suffix has been trimmed off -// beforehand. CAUTION: we used to verify that the pod and container id were -// descendants of a kubepods directory, however, as of Kubernetes 1.21, cgroups -// namespaces are in use and therefore we can no longer discern if that is the -// case from within SPIRE agent container (since the container itself is -// namespaced). As such, the regex has been relaxed to simply find the pod UID -// followed by the container ID with allowances for arbitrary punctuation, and -// container runtime prefixes, etc. -var cgroupRE = regexp.MustCompile(`` + - // "pod"-prefixed Pod UID (with punctuation separated groups) followed by punctuation - `[[:punct:]]pod([[:xdigit:]]{8}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{12})[[:punct:]]` + - // zero or more punctuation separated "segments" (e.g. "docker-") - `(?:[[:^punct:]]+[[:punct:]])*` + - // non-punctuation end of string, i.e., the container ID - `([[:^punct:]]+)$`) - -// cgroupNoPodUidRE is the backup regex, when cgroupRE does not match -// This regex applies for container runtimes, that won't put the PodUID into -// the cgroup name. -// Currently only cri-o is known for this abnormaly. -var cgroupNoPodUidRE = regexp.MustCompile(`` + - // /crio- - `[[:punct:]]crio[[:punct:]]` + - // non-punctuation end of string, i.e., the container ID - `([[:^punct:]]+)$`) +var cgroupREs = []*regexp.Regexp{ + // the regex used to parse out the pod UID and container ID from a + // cgroup name. It assumes that any ".scope" suffix has been trimmed off + // beforehand. CAUTION: we used to verify that the pod and container id were + // descendants of a kubepods directory, however, as of Kubernetes 1.21, cgroups + // namespaces are in use and therefore we can no longer discern if that is the + // case from within SPIRE agent container (since the container itself is + // namespaced). As such, the regex has been relaxed to simply find the pod UID + // followed by the container ID with allowances for arbitrary punctuation, and + // container runtime prefixes, etc. + regexp.MustCompile(`` + + // "pod"-prefixed Pod UID (with punctuation separated groups) followed by punctuation + `[[:punct:]]pod(?P[[:xdigit:]]{8}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{12})[[:punct:]]` + + // zero or more punctuation separated "segments" (e.g. "docker-") + `(?:[[:^punct:]]+[[:punct:]])*` + + // non-punctuation end of string, i.e., the container ID + `(?P[[:^punct:]]+)$`), + + // This regex applies for container runtimes, that won't put the PodUID into + // the cgroup name. + // Currently only cri-o in combination with kubeedge is known for this abnormaly. + regexp.MustCompile(`` + + // /crio- + `(?P)[[:punct:]]crio[[:punct:]]` + + // non-punctuation end of string, i.e., the container ID + `(?P[[:^punct:]]+)$`), +} + +func reSubMatchMap(r *regexp.Regexp, str string) map[string]string { + match := r.FindStringSubmatch(str) + var subMatchMap map[string]string = nil + if match != nil { + subMatchMap = make(map[string]string) + for i, name := range r.SubexpNames() { + if i != 0 { + subMatchMap[name] = match[i] + } + } + } + return subMatchMap +} func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string, bool) { // We are only interested in kube pods entries, for example: @@ -615,14 +630,24 @@ func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string // is cheap. cgroupPath = strings.TrimSuffix(cgroupPath, ".scope") - matches := cgroupRE.FindStringSubmatch(cgroupPath) - if matches != nil { - return canonicalizePodUID(matches[1]), matches[2], true - } else { - matches := cgroupNoPodUidRE.FindStringSubmatch(cgroupPath) + var matchResults map[string]string + for _, regex := range cgroupREs { + matches := reSubMatchMap(regex, cgroupPath) if matches != nil { - return "", matches[1], true + if matchResults != nil { + log.Printf("More than one regex matches for cgroup %s", cgroupPath) + return "", "", false + } + matchResults = matches + } + } + + if matchResults != nil { + var podUID types.UID = "" + if matchResults["poduid"] != "" { + podUID = canonicalizePodUID(matchResults["poduid"]) } + return podUID, matchResults["containerid"], true } return "", "", false } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go index 15b43ff064..00beab3b54 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go @@ -841,7 +841,7 @@ func (s *Suite) requireAttestSuccessWithCrioPod(p workloadattestor.WorkloadAttes func (s *Suite) requireAttestFailWithCrioPod(p workloadattestor.WorkloadAttestor) { s.addPodListResponse(crioPodListDuplicateContainerIdFilePath) s.addCgroupsResponse(cgPidInCrioPodFilePath) - s.requireAttestFailure(p, codes.Aborted, "Two pods found with same container Id") + s.requireAttestFailure(p, codes.Internal, "two pods found with same container Id") } func (s *Suite) requireAttestSuccessWithPodSystemdCgroups(p workloadattestor.WorkloadAttestor) { From 14eb3e843fc4668c89dce2e77b58be58fd45196c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Mon, 25 Jul 2022 11:33:09 +0200 Subject: [PATCH 07/13] Update pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Marcos Yacob Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 0c4fc30ca6..a62b4e059b 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -595,7 +595,7 @@ var cgroupREs = []*regexp.Regexp{ // This regex applies for container runtimes, that won't put the PodUID into // the cgroup name. - // Currently only cri-o in combination with kubeedge is known for this abnormaly. + // Currently only cri-o in combination with kubeedge is known for this abnormally. regexp.MustCompile(`` + // /crio- `(?P)[[:punct:]]crio[[:punct:]]` + From 37dd3bf9b92a54d786d61d3ce59ac08cc8e43dbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Mon, 25 Jul 2022 11:33:27 +0200 Subject: [PATCH 08/13] Update pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Marcos Yacob Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index a62b4e059b..64cc66e215 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -190,7 +190,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque return nil, err } - var attestResponse *workloadattestorv1.AttestResponse = nil + var attestResponse *workloadattestorv1.AttestResponse for _, item := range list.Items { item := item From 82fde379fc2a626846e52c222f8943d95fbc26d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Mon, 25 Jul 2022 11:33:43 +0200 Subject: [PATCH 09/13] Update pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Marcos Yacob Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 64cc66e215..290ae5cd8c 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -643,7 +643,7 @@ func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string } if matchResults != nil { - var podUID types.UID = "" + var podUID types.UID if matchResults["poduid"] != "" { podUID = canonicalizePodUID(matchResults["poduid"]) } From 4e46a8679359494087e8d0db504f921d7f9bd1ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Mon, 25 Jul 2022 11:34:04 +0200 Subject: [PATCH 10/13] Update pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Marcos Yacob Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 290ae5cd8c..91fb7149e2 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -605,7 +605,7 @@ var cgroupREs = []*regexp.Regexp{ func reSubMatchMap(r *regexp.Regexp, str string) map[string]string { match := r.FindStringSubmatch(str) - var subMatchMap map[string]string = nil + var subMatchMap map[string]string if match != nil { subMatchMap = make(map[string]string) for i, name := range r.SubexpNames() { From 10bb062ada663e03995cda82a9cb8ab4458cdeb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Mon, 25 Jul 2022 15:04:16 +0200 Subject: [PATCH 11/13] fixes compatibility with cri-o without kubeedge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian Görg --- .../plugin/workloadattestor/k8s/k8s_posix.go | 35 ++++++++++++++----- .../plugin/workloadattestor/k8s/k8s_test.go | 12 +++++-- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index 91fb7149e2..98f77cf78e 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -575,6 +575,9 @@ func getPodUIDAndContainerIDFromCGroups(cgroups []cgroups.Cgroup) (types.UID, st return podUID, containerID, nil } +// regexes listed here have to exlusively match a cgroup path +// the regexes must include two named groups "poduid" and "containerid" +// if the regex needs to exclude certain substrings, the "mustnotmatch" group can be used var cgroupREs = []*regexp.Regexp{ // the regex used to parse out the pod UID and container ID from a // cgroup name. It assumes that any ".scope" suffix has been trimmed off @@ -597,26 +600,40 @@ var cgroupREs = []*regexp.Regexp{ // the cgroup name. // Currently only cri-o in combination with kubeedge is known for this abnormally. regexp.MustCompile(`` + + // intentionally empty poduid group + `(?P)` + + // mustnotmatch group: cgroup path must not include a poduid + `(?Ppod[[:xdigit:]]{8}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{4}[[:punct:]]?[[:xdigit:]]{12}[[:punct:]])?` + // /crio- - `(?P)[[:punct:]]crio[[:punct:]]` + + `(?:[[:^punct:]]*/*)*crio[[:punct:]]` + // non-punctuation end of string, i.e., the container ID `(?P[[:^punct:]]+)$`), } func reSubMatchMap(r *regexp.Regexp, str string) map[string]string { match := r.FindStringSubmatch(str) - var subMatchMap map[string]string - if match != nil { - subMatchMap = make(map[string]string) - for i, name := range r.SubexpNames() { - if i != 0 { - subMatchMap[name] = match[i] - } + if match == nil { + return nil + } + subMatchMap := make(map[string]string) + for i, name := range r.SubexpNames() { + if i != 0 { + subMatchMap[name] = match[i] } } return subMatchMap } +func isValidCGroupPathMatches(matches map[string]string) bool { + if matches == nil { + return false + } + if matches["mustnotmatch"] != "" { + return false + } + return true +} + func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string, bool) { // We are only interested in kube pods entries, for example: // - /kubepods/burstable/pod2c48913c-b29f-11e7-9350-020968147796/9bca8d63d5fa610783847915bcff0ecac1273e5b4bed3f6fa1b07350e0135961 @@ -633,7 +650,7 @@ func getPodUIDAndContainerIDFromCGroupPath(cgroupPath string) (types.UID, string var matchResults map[string]string for _, regex := range cgroupREs { matches := reSubMatchMap(regex, cgroupPath) - if matches != nil { + if isValidCGroupPathMatches(matches) { if matchResults != nil { log.Printf("More than one regex matches for cgroup %s", cgroupPath) return "", "", false diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go index 00beab3b54..0e955e3fce 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_test.go @@ -221,7 +221,7 @@ func (s *Suite) TestAttestFailDuplicateContainerId() { s.startInsecureKubelet() p := s.loadInsecurePlugin() - s.requireAttestFailWithCrioPod(p) + s.requireAttestFailWithDuplicateContainerId(p) } func (s *Suite) TestAttestWithPidInPodSystemdCgroups() { @@ -838,7 +838,7 @@ func (s *Suite) requireAttestSuccessWithCrioPod(p workloadattestor.WorkloadAttes s.requireAttestSuccess(p, testCrioPodSelectors) } -func (s *Suite) requireAttestFailWithCrioPod(p workloadattestor.WorkloadAttestor) { +func (s *Suite) requireAttestFailWithDuplicateContainerId(p workloadattestor.WorkloadAttestor) { s.addPodListResponse(crioPodListDuplicateContainerIdFilePath) s.addCgroupsResponse(cgPidInCrioPodFilePath) s.requireAttestFailure(p, codes.Internal, "two pods found with same container Id") @@ -1079,11 +1079,17 @@ func TestGetPodUIDAndContainerIDFromCGroupPath(t *testing.T) { expectContainerID: "b2a102854b4969b2ce98dc329c86b4fb2b06e4ad2cc8da9d8a7578c9cd2004a2", }, { - name: "cri-o", + name: "cri-o in combination with kubeedge", cgroupPath: "0::/../crio-45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814.scope", expectPodUID: "", expectContainerID: "45490e76e0878aaa4d9808f7d2eefba37f093c3efbba9838b6d8ab804d9bd814", }, + { + name: "cri-o in combination with minikube", + cgroupPath: "9:devices:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod561fd272_d131_47ef_a01b_46a997a778f3.slice/crio-030ded69d4c98fcf69c988f75a5eb3a1b4357e1432bd5510c936a40d7e9a1198.scope", + expectPodUID: "561fd272-d131-47ef-a01b-46a997a778f3", + expectContainerID: "030ded69d4c98fcf69c988f75a5eb3a1b4357e1432bd5510c936a40d7e9a1198", + }, { name: "uid generateds by kubernetes", cgroupPath: "/kubepods/pod2732ca68f6358eba7703fb6f82a25c94", From d4eccac6de891c3becb13f95af122fc00932838c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Tue, 26 Jul 2022 07:56:24 +0200 Subject: [PATCH 12/13] fixes after merge MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s.go b/pkg/agent/plugin/workloadattestor/k8s/k8s.go index f79ecab8e2..2e0ec111c0 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s.go @@ -202,7 +202,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque continue } - status, lookup := lookUpContainerInPod(containerID, item.Status) + lookupStatus, lookup := lookUpContainerInPod(containerID, item.Status) switch lookup { case containerInPod: if attestResponse != nil { @@ -210,7 +210,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque return nil, status.Error(codes.Internal, "two pods found with same container Id") } attestResponse = &workloadattestorv1.AttestResponse{ - SelectorValues: getSelectorValuesFromPodInfo(&item, status), + SelectorValues: getSelectorValuesFromPodInfo(&item, lookupStatus), } case containerNotInPod: } From 2579119f4de823af54a3ebda87454980fded41c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6rg?= Date: Wed, 27 Jul 2022 09:25:43 +0200 Subject: [PATCH 13/13] puts podUID!="" into IsNotPod Fixes lint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian Görg --- pkg/agent/plugin/workloadattestor/k8s/k8s.go | 2 +- pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go | 2 +- pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s.go b/pkg/agent/plugin/workloadattestor/k8s/k8s.go index 2e0ec111c0..49ba919ba8 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s.go @@ -198,7 +198,7 @@ func (p *Plugin) Attest(ctx context.Context, req *workloadattestorv1.AttestReque var attestResponse *workloadattestorv1.AttestResponse for _, item := range list.Items { item := item - if podUID != "" && isNotPod(item.UID, podUID) { + if isNotPod(item.UID, podUID) { continue } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go index a93a780c18..15e2fade92 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix.go @@ -177,5 +177,5 @@ func canonicalizePodUID(uid string) types.UID { } func isNotPod(itemPodUID, podUID types.UID) bool { - return itemPodUID != podUID + return podUID != "" && itemPodUID != podUID } diff --git a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go index f59304c4f1..975b45fb79 100644 --- a/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go +++ b/pkg/agent/plugin/workloadattestor/k8s/k8s_posix_test.go @@ -22,7 +22,7 @@ import ( const ( kindPodListFilePath = "testdata/kind_pod_list.json" crioPodListFilePath = "testdata/crio_pod_list.json" - crioPodListDuplicateContainerIdFilePath = "testdata/crio_pod_list_duplicate_containerId.json" + crioPodListDuplicateContainerIDFilePath = "testdata/crio_pod_list_duplicate_containerId.json" cgPidInPodFilePath = "testdata/cgroups_pid_in_pod.txt" cgPidInKindPodFilePath = "testdata/cgroups_pid_in_kind_pod.txt" @@ -132,7 +132,7 @@ func (s *Suite) TestAttestFailDuplicateContainerId() { s.startInsecureKubelet() p := s.loadInsecurePlugin() - s.requireAttestFailWithDuplicateContainerId(p) + s.requireAttestFailWithDuplicateContainerID(p) } func (s *Suite) TestAttestWithPidInPodSystemdCgroups() { @@ -183,8 +183,8 @@ func (s *Suite) requireAttestSuccessWithCrioPod(p workloadattestor.WorkloadAttes s.requireAttestSuccess(p, testCrioPodSelectors) } -func (s *Suite) requireAttestFailWithDuplicateContainerId(p workloadattestor.WorkloadAttestor) { - s.addPodListResponse(crioPodListDuplicateContainerIdFilePath) +func (s *Suite) requireAttestFailWithDuplicateContainerID(p workloadattestor.WorkloadAttestor) { + s.addPodListResponse(crioPodListDuplicateContainerIDFilePath) s.addCgroupsResponse(cgPidInCrioPodFilePath) s.requireAttestFailure(p, codes.Internal, "two pods found with same container Id") }