diff --git a/.golangci.yml b/.golangci.yml index 660ca915808..f0007740cb5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -44,5 +44,6 @@ run: skip-dirs: - vendor - pkg/client + - pkg/spire/test timeout: 10m modules-download-mode: vendor diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 137f4721c39..0a34b163bb1 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -61,12 +61,20 @@ func main() { flag.StringVar(&opts.Images.ImageDigestExporterImage, "imagedigest-exporter-image", "", "The container image containing our image digest exporter binary.") flag.StringVar(&opts.Images.WorkingDirInitImage, "workingdirinit-image", "", "The container image containing our working dir init binary.") + flag.StringVar(&opts.SpireConfig.TrustDomain, "spire-trust-domain", "example.org", "Experimental: The SPIRE Trust domain to use.") + flag.StringVar(&opts.SpireConfig.SocketPath, "spire-socket-path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.") + flag.StringVar(&opts.SpireConfig.ServerAddr, "spire-server-addr", "spire-server.spire.svc.cluster.local:8081", "Experimental: The SPIRE server address for workload/node registration.") + flag.StringVar(&opts.SpireConfig.NodeAliasPrefix, "spire-node-alias-prefix", "/tekton-node/", "Experimental: The SPIRE node alias prefix to use.") + // This parses flags. cfg := injection.ParseAndGetRESTConfigOrDie() if err := opts.Images.Validate(); err != nil { log.Fatal(err) } + if err := opts.SpireConfig.Validate(); err != nil { + log.Fatal(err) + } if cfg.QPS == 0 { cfg.QPS = 2 * rest.DefaultQPS } diff --git a/cmd/entrypoint/main.go b/cmd/entrypoint/main.go index 445452f7464..f138c46e2e6 100644 --- a/cmd/entrypoint/main.go +++ b/cmd/entrypoint/main.go @@ -34,6 +34,8 @@ import ( "github.com/tektoncd/pipeline/pkg/credentials/dockercreds" "github.com/tektoncd/pipeline/pkg/credentials/gitcreds" "github.com/tektoncd/pipeline/pkg/entrypoint" + "github.com/tektoncd/pipeline/pkg/spire" + "github.com/tektoncd/pipeline/pkg/spire/config" "github.com/tektoncd/pipeline/pkg/termination" ) @@ -51,6 +53,8 @@ var ( onError = flag.String("on_error", "", "Set to \"continue\" to ignore an error and continue when a container terminates with a non-zero exit code."+ " Set to \"stopAndFail\" to declare a failure with a step error and stop executing the rest of the steps.") stepMetadataDir = flag.String("step_metadata_dir", "", "If specified, create directory to store the step metadata e.g. /tekton/steps//") + enableSpire = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification") + socketPath = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.") ) const ( @@ -131,6 +135,14 @@ func main() { } } + var spireWorkloadAPI spire.EntrypointerAPIClient + if enableSpire != nil && *enableSpire && socketPath != nil && *socketPath != "" { + spireConfig := config.SpireConfig{ + SocketPath: *socketPath, + } + spireWorkloadAPI = spire.NewEntrypointerAPIClient(&spireConfig) + } + e := entrypoint.Entrypointer{ Command: append(cmd, commandArgs...), WaitFiles: strings.Split(*waitFiles, ","), @@ -148,6 +160,7 @@ func main() { BreakpointOnFailure: *breakpointOnFailure, OnError: *onError, StepMetadataDir: *stepMetadataDir, + SpireWorkloadAPI: spireWorkloadAPI, } // Copy any creds injected by the controller into the $HOME directory of the current diff --git a/cmd/imagedigestexporter/main.go b/cmd/imagedigestexporter/main.go index 33496dab427..95309c963a0 100644 --- a/cmd/imagedigestexporter/main.go +++ b/cmd/imagedigestexporter/main.go @@ -17,9 +17,12 @@ limitations under the License. package main import ( + "context" "encoding/json" "flag" + "github.com/tektoncd/pipeline/pkg/spire" + "github.com/tektoncd/pipeline/pkg/spire/config" "github.com/tektoncd/pipeline/pkg/termination" "knative.dev/pkg/logging" @@ -31,6 +34,8 @@ import ( var ( images = flag.String("images", "", "List of images resources built by task in json format") terminationMessagePath = flag.String("terminationMessagePath", "/tekton/termination", "Location of file containing termination message") + enableSpire = flag.Bool("enable_spire", false, "If specified by configmap, this enables spire signing and verification") + socketPath = flag.String("spire_socket_path", "unix:///spiffe-workload-api/spire-agent.sock", "Experimental: The SPIRE agent socket for SPIFFE workload API.") ) /* The input of this go program will be a JSON string with all the output PipelineResources of type @@ -76,6 +81,21 @@ func main() { } + if enableSpire != nil && *enableSpire && socketPath != nil && *socketPath != "" { + ctx := context.Background() + spireConfig := config.SpireConfig{ + SocketPath: *socketPath, + } + + spireWorkloadAPI := spire.NewEntrypointerAPIClient(&spireConfig) + signed, err := spireWorkloadAPI.Sign(ctx, output) + if err != nil { + logger.Fatal(err) + } + + output = append(output, signed...) + } + if err := termination.WriteMessage(*terminationMessagePath, output); err != nil { logger.Fatalf("Unexpected error writing message %s to %s", *terminationMessagePath, err) } diff --git a/config/config-feature-flags.yaml b/config/config-feature-flags.yaml index 2c0619c1f4c..515ff1579cd 100644 --- a/config/config-feature-flags.yaml +++ b/config/config-feature-flags.yaml @@ -81,3 +81,7 @@ data: # Setting this flag to "true" enables CloudEvents for Runs, as long as a # CloudEvents sink is configured in the config-defaults config map send-cloudevents-for-runs: "false" + # Setting this flag to "true" enables spire integration with pipeline. + # This is an experimental feature and thus should still be considered + # an alpha feature. + enable-spire: "false" diff --git a/docs/spire.md b/docs/spire.md new file mode 100644 index 00000000000..5bc4ab2a2f7 --- /dev/null +++ b/docs/spire.md @@ -0,0 +1,285 @@ + +# TaskRun Result Attestations + +TaskRun result attestations is currently an alpha experimental feature. + +The TaskRun result attestations feature provides the first part of non-falsifiable provenance to the build processes that run in the pipeline. They ensure that the results of the tekton pipeline executions originate from the build workloads themselves and that they have not been tampered with. The second part of non-falsifiable provenance is to ensure that no third party interfered with the build process. Using SPIRE, the TaskRun status is monitored for any activity or change not preformed by the Tekton Pipeline Controller. If a unauthorized change is detected, it will invalidate the TaskRun. + +When the TaskRun result attestations feature is enabled, all TaskRuns will produce a signature alongside its results, which can then be used to validate its provenance. For example, a TaskRun result that creates user-specified results `commit` and `url` would look like the following. `SVID`, `RESULT_MANIFEST`, `RESULT_MANIFEST.sig`, `commit.sig` and `url.sig` are generated attestations by the integration of SPIRE and Tekton Controller. + +Parsed, the fields would be: +``` +... + +... +πŸ“ Results + + NAME VALUE + βˆ™ RESULT_MANIFEST commit,url,SVID,commit.sig,url.sig + βˆ™ RESULT_MANIFEST.sig MEUCIQD55MMII9SEk/esQvwNLGC43y7efNGZ+7fsTdq+9vXYFAIgNoRW7cV9WKriZkcHETIaAKqfcZVJfsKbEmaDyohDSm4= + βˆ™ SVID -----BEGIN CERTIFICATE----- +MIICGzCCAcGgAwIBAgIQH9VkLxKkYMidPIsofckRQTAKBggqhkjOPQQDAjAeMQsw +CQYDVQQGEwJVUzEPMA0GA1UEChMGU1BJRkZFMB4XDTIyMDIxMTE2MzM1MFoXDTIy +MDIxMTE3MzQwMFowHTELMAkGA1UEBhMCVVMxDjAMBgNVBAoTBVNQSVJFMFkwEwYH +KoZIzj0CAQYIKoZIzj0DAQcDQgAEBRdg3LdxVAELeH+lq8wzdEJd4Gnt+m9G0Qhy +NyWoPmFUaj9vPpvOyRgzxChYnW0xpcDWihJBkq/EbusPvQB8CKOB4TCB3jAOBgNV +HQ8BAf8EBAMCA6gwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1Ud +EwEB/wQCMAAwHQYDVR0OBBYEFID7ARM5+vwzvnLPMO7Icfnj7l7hMB8GA1UdIwQY +MBaAFES3IzpGDqgV3QcQNgX8b/MBwyAtMF8GA1UdEQRYMFaGVHNwaWZmZTovL2V4 +YW1wbGUub3JnL25zL2RlZmF1bHQvdGFza3J1bi9jYWNoZS1pbWFnZS1waXBlbGlu +ZXJ1bi04ZHE5Yy1mZXRjaC1mcm9tLWdpdDAKBggqhkjOPQQDAgNIADBFAiEAi+LR +JkrZn93PZPslaFmcrQw3rVcEa4xKmPleSvQaBoACIF1QB+q1uwH6cNvWdbLK9g+W +T9Np18bK0xc6p5SuTM2C +-----END CERTIFICATE----- + βˆ™ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + βˆ™ commit.sig MEQCIEJHk+8B+mCFozp0F52TQ1AadlhEo1lZNOiOnb/ht71aAiBCE0otKB1R0BktlPvweFPldfZfjG0F+NUSc2gPzhErzg== + βˆ™ url https://github.com/buildpacks/samples + βˆ™ url.sig MEUCIF0Fuxr6lv1MmkreqDKcPH3m+eXp+gY++VcxWgGCx7T1AiEA9U/tROrKuCGfKApLq2A9EModbdoGXyQXFOpAa0aMpOg= +``` + +However, the verification materials are removed from the final results as part of the TaskRun status. It is stored in the termination messages (more details below): + +``` +$ tkn tr describe cache-image-pipelinerun-8dq9c-fetch-from-git +... + +... +πŸ“ Results + NAME VALUE + βˆ™ commit aa79de59c4bae24e32f15fda467d02ae9cd94b01 + βˆ™ url https://github.com/buildpacks/samples +``` + +## Architecture Overview + +This feature relies on a SPIRE installation. This is how it integrates into the architecture of Tekton: + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” Register TaskRun Workload Identity β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β–Ίβ”‚ β”‚ +β”‚ Tekton β”‚ β”‚ SPIRE β”‚ +β”‚ Controller │◄───────────┐ β”‚ Server β”‚ +β”‚ β”‚ β”‚ Listen on TaskRun β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”˜ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β–² β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β–² + β”‚ β”‚ β”‚ Tekton TaskRun β”‚ β”‚ + β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ + β”‚ Configureβ”‚ β–² β”‚ Attest + β”‚ Pod & β”‚ β”‚ β”‚ + + β”‚ check β”‚ β”‚ β”‚ Request + β”‚ ready β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚ SVIDs + β”‚ └────►│ TaskRun β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ + β”‚ β”‚ Pod β”‚ β”‚ + β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ TaskRun Entrypointer β”‚ + β”‚ β–² Sign Result and update β”‚ + β”‚ Get β”‚ Get SVID TaskRun status with β”‚ + β”‚ SPIRE β”‚ signature + cert β”‚ + β”‚ server β”‚ β”‚ + β”‚ Credentials β”‚ β–Ό +β”Œβ”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ β”‚ +β”‚ SPIRE Agent ( Runs as ) β”‚ +β”‚ + CSI Driver ( Daemonset ) β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +Initial Setup: +1. As part of the SPIRE deployment, the SPIRE server attests the agents running on each node in the cluster. +2. The Tekton Controller is configured to have workload identity entry creation permissions to the SPIRE server. +3. As part of the Tekton Controller operations, the Tekton Controller will retrieve an identity that it can use to talk to the SPIRE server to register TaskRun workloads. + +When a TaskRun is created: +1. The Tekton Controller creates a TaskRun pod and its associated resources +1. When the TaskRun pod is ready, the Tekton Controller registers an identity with the information of the pod to the SPIRE server. This will tell the SPIRE server the identity of the TaskRun to use as well as how to attest the workload/pod. +1. After the TaskRun steps complete, as part of the entrypointer code, it requests an SVID from SPIFFE workload API (via the SPIRE agent socket) +1. The SPIRE agent will attest the workload and request an SVID. +1. The entrypointer receives an x509 SVID, containing the x509 certificate and associated private key. +1. The entrypointer signs the results of the TaskRun and emits the signatures and x509 certificate to the TaskRun results for later verification. + +## Enabling TaskRun result attestations + +To enable TaskRun attestations: +1. Make sure `enable-spire` is set to `"true"` in the `feature-flags` configmap, see [`install.md`](./install.md#customizing-the-pipelines-controller-behavior) for details +1. Create a SPIRE deployment containing a SPIRE server, SPIRE agents and the SPIRE CSI driver, for convenience, [this sample single cluster deployment](https://github.com/spiffe/spiffe-csi/tree/main/example/config) can be used. +1. Register the SPIRE workload entry for Tekton with the "Admin" flag, which will allow the Tekton controller to communicate with the SPIRE server to manage the TaskRun identities dynamically. + ``` + # This example is assuming use of the above SPIRE deployment + # Example where trust domain is "example.org" and cluster name is "example-cluster" + + # Register a node alias for all nodes of which the Tekton Controller may reside + kubectl -n spire exec -it \ + deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create \ + -node \ + -spiffeID spiffe://example.org/allnodes \ + -selector k8s_psat:cluster:example-cluster + + # Register the tekton controller workload to have access to creating entries in the SPIRE server + kubectl -n spire exec -it \ + deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create \ + -admin \ + -spiffeID spiffe://example.org/tekton/controller \ + -parentID spiffe://example.org/allnode \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller + + ``` +1. Modify the controller (`config/controller.yaml`) to provide access to the SPIRE agent socket. + ```yaml + # Add the following the volumeMounts of the "tekton-pipelines-controller" container + - name: spiffe-workload-api + mountPath: /spiffe-workload-api + readOnly: true + + # Add the following to the volumes of the controller pod + - name: spiffe-workload-api + csi: + driver: "csi.spiffe.io" + ``` +1. (Optional) Modify the controller (`config/controller.yaml`) to configure non-default SPIRE options by adding arguments to the CLI. + ```yaml + containers: + - name: tekton-pipelines-controller + image: ko://github.com/tektoncd/pipeline/cmd/controller + args: [ + # These images are built on-demand by `ko resolve` and are replaced + # by image references by digest. + "-kubeconfig-writer-image", "ko://github.com/tektoncd/pipeline/cmd/kubeconfigwriter", + "-git-image", "ko://github.com/tektoncd/pipeline/cmd/git-init", + "-entrypoint-image", "ko://github.com/tektoncd/pipeline/cmd/entrypoint", + "-nop-image", "ko://github.com/tektoncd/pipeline/cmd/nop", + "-imagedigest-exporter-image", "ko://github.com/tektoncd/pipeline/cmd/imagedigestexporter", + "-pr-image", "ko://github.com/tektoncd/pipeline/cmd/pullrequest-init", + "-workingdirinit-image", "ko://github.com/tektoncd/pipeline/cmd/workingdirinit", + + # Configure optional SPIRE arguments + + "-spire-trust-domain", "example.org", + + "-spire-socket-path", "/spiffe-workload-api/spire-agent.sock", + + "spire-server-addr", "spire-server.spire.svc.cluster.local:8081" + + "spire-node-alias-prefix", "/tekton-node/", + + # This is gcr.io/google.com/cloudsdktool/cloud-sdk:302.0.0-slim + "-gsutil-image", "gcr.io/google.com/cloudsdktool/cloud-sdk@sha256:27b2c22bf259d9bc1a291e99c63791ba0c27a04d2db0a43241ba0f1f20f4067f", + # The shell image must be root in order to create directories and copy files to PVCs. + # gcr.io/distroless/base:debug as of October 21, 2021 + # image shall not contains tag, so it will be supported on a runtime like cri-o + "-shell-image", "gcr.io/distroless/base@sha256:cfdc553400d41b47fd231b028403469811fcdbc0e69d66ea8030c5a0b5fbac2b", + # for script mode to work with windows we need a powershell image + # pinning to nanoserver tag as of July 15 2021 + "-shell-image-win", "mcr.microsoft.com/powershell:nanoserver@sha256:b6d5ff841b78bdf2dfed7550000fd4f3437385b8fa686ec0f010be24777654d6", + ] + ``` + +## Sample TaskRun attestation + +The following example shows how this feature works: + +```yaml +kind: TaskRun +apiVersion: tekton.dev/v1beta1 +metadata: + name: non-falsifiable-provenance +spec: + timeout: 60s + taskSpec: + steps: + - name: non-falsifiable + image: ubuntu + script: | + #!/usr/bin/env bash + printf "%s" "hello" > "$(results.foo.path)" + printf "%s" "world" > "$(results.bar.path)" + results: + - name: foo + - name: bar +``` + + +The termination message is: +``` +message: '[{"key":"RESULT_MANIFEST","value":"foo,bar","type":1},{"key":"RESULT_MANIFEST.sig","value":"MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw==","type":1},{"key":"SVID","value":"-----BEGIN + CERTIFICATE-----\nMIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL\nMAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y\nMjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG\nByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7\nHMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD\nVR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV\nHRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME\nGDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l\neGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy\nb3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56\n4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA==\n-----END + CERTIFICATE-----\n","type":1},{"key":"bar","value":"world","type":1},{"key":"bar.sig","value":"MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M=","type":1},{"key":"foo","value":"hello","type":1},{"key":"foo.sig","value":"MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww==","type":1}]' +``` + +Parsed, the fields are: +- `RESULT_MANIFEST`: List of results that should be present, to prevent pick and choose attacks +- `RESULT_MANIFEST.sig`: The signature of the result manifest +- `SVID`: The x509 certificate that will be used to verify the signature trust chain to the authority +- `*.sig`: The signature of each individual result output +``` + βˆ™ RESULT_MANIFEST foo,bar + βˆ™ RESULT_MANIFEST.sig MEQCIB4grfqBkcsGuVyoQd9KUVzNZaFGN6jQOKK90p5HWHqeAiB7yZerDA+YE3Af/ALG43DQzygiBpKhTt8gzWGmpvXJFw== + βˆ™ SVID -----BEGIN CERTIFICATE----- +MIICCjCCAbCgAwIBAgIRALH94zAZZXdtPg97O5vG5M0wCgYIKoZIzj0EAwIwHjEL +MAkGA1UEBhMCVVMxDzANBgNVBAoTBlNQSUZGRTAeFw0yMjAzMTQxNTUzNTlaFw0y +MjAzMTQxNjU0MDlaMB0xCzAJBgNVBAYTAlVTMQ4wDAYDVQQKEwVTUElSRTBZMBMG +ByqGSM49AgEGCCqGSM49AwEHA0IABPLzFTDY0RDpjKb+eZCIWgUw9DViu8/pM8q7 +HMTKCzlyGqhaU80sASZfpkZvmi72w+gLszzwVI1ZNU5e7aCzbtSjgc8wgcwwDgYD +VR0PAQH/BAQDAgOoMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNV +HRMBAf8EAjAAMB0GA1UdDgQWBBSsUvspy+/Dl24pA1f+JuNVJrjgmTAfBgNVHSME +GDAWgBSOMyOHnyLLGxPSD9RRFL+Yhm/6qzBNBgNVHREERjBEhkJzcGlmZmU6Ly9l +eGFtcGxlLm9yZy9ucy9kZWZhdWx0L3Rhc2tydW4vbm9uLWZhbHNpZmlhYmxlLXBy +b3ZlbmFuY2UwCgYIKoZIzj0EAwIDSAAwRQIhAM4/bPAH9dyhBEj3DbwtJKMyEI56 +4DVrP97ps9QYQb23AiBiXWrQkvRYl0h4CX0lveND2yfqLrGdVL405O5NzCcUrA== +-----END CERTIFICATE----- + βˆ™ bar world + βˆ™ bar.sig MEUCIQDOtg+aEP1FCr6/FsHX+bY1d5abSQn2kTiUMg4Uic2lVQIgTVF5bbT/O77VxESSMtQlpBreMyw2GmKX2hYJlaOEH1M= + βˆ™ foo hello + βˆ™ foo.sig MEQCIBr+k0i7SRSyb4h96vQE9hhxBZiZb/2PXQqReOKJDl/rAiBrjgSsalwOvN0zgQay0xQ7PRbm5YSmI8tvKseLR8Ryww== +``` + + +However, the verification materials are removed from the results as part of the TaskRun status: +```console +$ tkn tr describe non-falsifiable-provenance +Name: non-falsifiable-provenance +Namespace: default +Service Account: default +Timeout: 1m0s +Labels: + app.kubernetes.io/managed-by=tekton-pipelines + +🌑️ Status + +STARTED DURATION STATUS +38 seconds ago 36 seconds Succeeded + +πŸ“ Results + + NAME VALUE + βˆ™ bar world + βˆ™ foo hello + +🦢 Steps + + NAME STATUS + βˆ™ non-falsifiable Completed +``` + +## How is the result being verified + +The signatures are being verified by the Tekton controller, the process of verification is as follows: + +- Verifying the SVID + - Obtain the trust bundle from the SPIRE server + - Verify the SVID with the trust bundle + - Verify that the SVID spiffe ID is for the correct TaskRun +- Verifying the result manifest + - Verify the content of `RESULT_MANIFEST` with the field `RESULT_MANIFEST.sig` with the SVID public key + - Verify that there is a corresponding field for all items listed in `RESULT_MANIFEST` (besides SVID and `*.sig` fields) +- Verify individual result fields + - For each of the items in the results, verify its content against its associated `.sig` field + + +## Further Details + +To learn more about SPIRE TaskRun attestations, check out the [TEP](https://github.com/tektoncd/community/blob/main/teps/0089-nonfalsifiable-provenance-support.md). \ No newline at end of file diff --git a/examples/v1beta1/pipelineruns/4808-regression.yaml b/examples/v1beta1/pipelineruns/4808-regression.yaml index df4502a8a88..019c98002c3 100644 --- a/examples/v1beta1/pipelineruns/4808-regression.yaml +++ b/examples/v1beta1/pipelineruns/4808-regression.yaml @@ -92,4 +92,4 @@ spec: name: result-test params: - name: RESULT_STRING_LENGTH - value: "3000" + value: "2800" diff --git a/go.mod b/go.mod index dbaf2b032b2..a9864feae69 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/tektoncd/pipeline go 1.17 require ( + github.com/Microsoft/go-winio v0.5.2 github.com/cloudevents/sdk-go/v2 v2.10.1 github.com/containerd/containerd v1.5.13 github.com/google/go-cmp v0.5.8 @@ -16,11 +17,14 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/opencontainers/image-spec v1.0.3-0.20220114050600-8b9d41f48198 github.com/pkg/errors v0.9.1 + github.com/spiffe/go-spiffe/v2 v2.1.1 + github.com/spiffe/spire-api-sdk v1.3.2 github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f go.opencensus.io v0.23.0 go.uber.org/zap v1.21.0 golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b gomodules.xyz/jsonpatch/v2 v2.2.0 + gopkg.in/square/go-jose.v2 v2.5.1 k8s.io/api v0.23.5 k8s.io/apimachinery v0.23.5 k8s.io/client-go v0.23.5 @@ -33,6 +37,8 @@ require ( require github.com/benbjohnson/clock v1.1.0 // indirect +require github.com/zeebo/errs v1.2.2 // indirect + require ( cloud.google.com/go/compute v1.5.0 // indirect contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect @@ -117,7 +123,7 @@ require ( github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.8.0 // indirect + github.com/stretchr/testify v1.8.0 github.com/tektoncd/resolution v0.0.0-20220331203013-e4203c70c5eb github.com/vbatts/tar-split v0.11.2 // indirect go.uber.org/atomic v1.9.0 // indirect @@ -137,8 +143,8 @@ require ( google.golang.org/api v0.70.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220303160752-862486edd9cc // indirect - google.golang.org/grpc v1.44.0 // indirect - google.golang.org/protobuf v1.27.1 // indirect + google.golang.org/grpc v1.46.0 + google.golang.org/protobuf v1.28.0 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 73a4f22ff35..76f244ca054 100644 --- a/go.sum +++ b/go.sum @@ -130,6 +130,8 @@ github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= @@ -490,6 +492,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= @@ -1282,6 +1285,10 @@ github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5q github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k= +github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= +github.com/spiffe/spire-api-sdk v1.3.2 h1:8F5HQGm3jDL6amuxxeQcH8Rqs6/WOwaLt6h0LTU6uYA= +github.com/spiffe/spire-api-sdk v1.3.2/go.mod h1:73BC0cOGkqRQrqoB1Djk7etxN+bE1ypmzZMkhCQs6kY= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= @@ -1386,6 +1393,8 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g= +github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1984,6 +1993,7 @@ google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2068,9 +2078,11 @@ google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/examples v0.0.0-20201130180447-c456688b1860/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2083,8 +2095,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2114,6 +2127,8 @@ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= diff --git a/pkg/apis/config/feature_flags.go b/pkg/apis/config/feature_flags.go index 555b28e5f3e..c294daa608b 100644 --- a/pkg/apis/config/feature_flags.go +++ b/pkg/apis/config/feature_flags.go @@ -60,6 +60,8 @@ const ( DefaultSendCloudEventsForRuns = false // DefaultEmbeddedStatus is the default value for "embedded-status". DefaultEmbeddedStatus = FullEmbeddedStatus + // DefaultEnableSpire is the default value for "enable-spire". + DefaultEnableSpire = false disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" @@ -71,6 +73,7 @@ const ( enableAPIFields = "enable-api-fields" sendCloudEventsForRuns = "send-cloudevents-for-runs" embeddedStatus = "embedded-status" + enableSpire = "enable-spire" ) // FeatureFlags holds the features configurations @@ -87,6 +90,7 @@ type FeatureFlags struct { SendCloudEventsForRuns bool AwaitSidecarReadiness bool EmbeddedStatus string + EnableSpire bool } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -148,6 +152,7 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if tc.EnableAPIFields == AlphaAPIFields { tc.EnableTektonOCIBundles = true tc.EnableCustomTasks = true + tc.EnableSpire = true } else { if err := setFeature(enableTektonOCIBundles, DefaultEnableTektonOciBundles, &tc.EnableTektonOCIBundles); err != nil { return nil, err @@ -155,6 +160,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(enableCustomTasks, DefaultEnableCustomTasks, &tc.EnableCustomTasks); err != nil { return nil, err } + if err := setFeature(enableSpire, DefaultEnableSpire, &tc.EnableSpire); err != nil { + return nil, err + } } return &tc, nil } diff --git a/pkg/apis/config/feature_flags_test.go b/pkg/apis/config/feature_flags_test.go index 40104cc76a1..9c685eb904b 100644 --- a/pkg/apis/config/feature_flags_test.go +++ b/pkg/apis/config/feature_flags_test.go @@ -60,6 +60,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) { EnableAPIFields: "alpha", SendCloudEventsForRuns: true, EmbeddedStatus: "both", + EnableSpire: true, }, fileName: "feature-flags-all-flags-set", }, @@ -70,6 +71,7 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) { // if the submitted text value is "false". EnableTektonOCIBundles: true, EnableCustomTasks: true, + EnableSpire: true, DisableAffinityAssistant: config.DefaultDisableAffinityAssistant, DisableCredsInit: config.DefaultDisableCredsInit, @@ -97,6 +99,17 @@ func TestNewFeatureFlagsFromConfigMap(t *testing.T) { }, fileName: "feature-flags-bundles-and-custom-tasks", }, + { + expectedConfig: &config.FeatureFlags{ + EnableAPIFields: "stable", + EmbeddedStatus: "full", + EnableSpire: true, + + RunningInEnvWithInjectedSidecars: config.DefaultRunningInEnvWithInjectedSidecars, + AwaitSidecarReadiness: config.DefaultAwaitSidecarReadiness, + }, + fileName: "feature-flags-enable-spire", + }, } for _, tc := range testCases { @@ -121,6 +134,7 @@ func TestNewFeatureFlagsFromEmptyConfigMap(t *testing.T) { EnableAPIFields: config.DefaultEnableAPIFields, SendCloudEventsForRuns: config.DefaultSendCloudEventsForRuns, EmbeddedStatus: config.DefaultEmbeddedStatus, + EnableSpire: config.DefaultEnableSpire, } verifyConfigFileWithExpectedFeatureFlagsConfig(t, FeatureFlagsConfigEmptyName, expectedConfig) } diff --git a/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml b/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml index 26631b8802c..751d6670185 100644 --- a/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml +++ b/pkg/apis/config/testdata/feature-flags-all-flags-set.yaml @@ -27,3 +27,4 @@ data: enable-api-fields: "alpha" send-cloudevents-for-runs: "true" embedded-status: "both" + enable-spire: "true" diff --git a/pkg/apis/config/testdata/feature-flags-enable-api-fields-overrides-bundles-and-custom-tasks.yaml b/pkg/apis/config/testdata/feature-flags-enable-api-fields-overrides-bundles-and-custom-tasks.yaml index 545ba7fe21d..cea578c238d 100644 --- a/pkg/apis/config/testdata/feature-flags-enable-api-fields-overrides-bundles-and-custom-tasks.yaml +++ b/pkg/apis/config/testdata/feature-flags-enable-api-fields-overrides-bundles-and-custom-tasks.yaml @@ -6,4 +6,5 @@ metadata: data: enable-tekton-oci-bundles: "false" enable-custom-tasks: "false" + enable-spire: "false" enable-api-fields: "alpha" diff --git a/pkg/apis/config/testdata/feature-flags-enable-spire.yaml b/pkg/apis/config/testdata/feature-flags-enable-spire.yaml new file mode 100644 index 00000000000..ae4e99a93c6 --- /dev/null +++ b/pkg/apis/config/testdata/feature-flags-enable-spire.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: feature-flags + namespace: tekton-pipelines +data: + enable-spire: "true" diff --git a/pkg/apis/pipeline/options.go b/pkg/apis/pipeline/options.go index 2e75adca4c1..6c15c86f365 100644 --- a/pkg/apis/pipeline/options.go +++ b/pkg/apis/pipeline/options.go @@ -16,8 +16,13 @@ limitations under the License. package pipeline +import ( + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" +) + // Options holds options passed to the Tekton Pipeline controllers // typically via command-line flags. type Options struct { - Images Images + Images Images + SpireConfig spireconfig.SpireConfig } diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types.go b/pkg/apis/pipeline/v1beta1/taskrun_types.go index 44a0f02b816..767ce332fee 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -124,6 +124,19 @@ type TaskRunStatus struct { TaskRunStatusFields `json:",inline"` } +// TaskRunConditionType is an enum used to store TaskRun custom conditions +// conditions such as one used in spire results verification +type TaskRunConditionType string + +const ( + // TaskRunConditionResultsVerified is a Condition Type that indicates that the results were verified by spire + TaskRunConditionResultsVerified TaskRunConditionType = "SignedResultsVerified" +) + +func (t TaskRunConditionType) String() string { + return string(t) +} + // TaskRunReason is an enum used to store all TaskRun reason for // the Succeeded condition that are controlled by the TaskRun itself. Failure // reasons that emerge from underlying resources are not included here @@ -147,6 +160,12 @@ const ( TaskRunReasonResolvingTaskRef = "ResolvingTaskRef" // TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed" + // TaskRunReasonResultsVerified is the reason set when the TaskRun results are verified by spire + TaskRunReasonResultsVerified TaskRunReason = "TaskRunResultsVerified" + // TaskRunReasonsResultsVerificationFailed is the reason set when the TaskRun results are failed to verify by spire + TaskRunReasonsResultsVerificationFailed TaskRunReason = "TaskRunResultsVerificationFailed" + // AwaitingTaskRunResults is the reason set when waiting upon `TaskRun` results and signatures to verify + AwaitingTaskRunResults TaskRunReason = "AwaitingTaskRunResults" ) func (t TaskRunReason) String() string { @@ -424,6 +443,16 @@ func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled } +// IsTaskRunResultVerified returns true if the TaskRun's results have been validated by spire. +func (tr *TaskRun) IsTaskRunResultVerified() bool { + return tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsTrue() +} + +// IsTaskRunResultDone returns true if the TaskRun's results are available for verification +func (tr *TaskRun) IsTaskRunResultDone() bool { + return !tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsUnknown() +} + // HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout func (tr *TaskRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { if tr.Status.StartTime.IsZero() { diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types_test.go b/pkg/apis/pipeline/v1beta1/taskrun_types_test.go index eb80af8cb65..cce99b9ff1f 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types_test.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types_test.go @@ -135,6 +135,70 @@ func TestTaskRunIsCancelled(t *testing.T) { } } +func TestTaskRunIsTaskRunResultVerified(t *testing.T) { + tr := &v1beta1.TaskRun{ + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }}, + }, + }, + } + if !tr.IsTaskRunResultVerified() { + t.Fatal("Expected pipelinerun status to be results verified") + } + if tr.Status.GetCondition(apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String())).Reason != v1beta1.TaskRunReasonResultsVerified.String() { + t.Fatal("Expected pipelinerun status reason to be TaskRunResultsVerified") + } +} + +func TestTaskRunEmptyIsTaskRunResultVerified(t *testing.T) { + tr := &v1beta1.TaskRun{ + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{}, + }, + } + if tr.IsTaskRunResultVerified() { + t.Fatal("Expected false as no condition exists for SignedResultsVerified") + } +} + +func TestTaskRunIsTaskRunResultDone(t *testing.T) { + tr := &v1beta1.TaskRun{ + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: []apis.Condition{{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }}, + }, + }, + } + if tr.IsTaskRunResultDone() { + t.Fatal("Expected pipelinerun status to be unknown and waiting") + } + if tr.Status.GetCondition(apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String())).Reason != v1beta1.AwaitingTaskRunResults.String() { + t.Fatal("Expected pipelinerun status reason to be AwaitingTaskRunResults") + } +} + +func TestTaskRunEmptyIsTaskRunResultDone(t *testing.T) { + tr := &v1beta1.TaskRun{ + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{}, + }, + } + if tr.IsTaskRunResultDone() { + t.Fatal("Expected false as no condition exists for SignedResultsVerified") + } +} + func TestTaskRunHasVolumeClaimTemplate(t *testing.T) { tr := &v1beta1.TaskRun{ Spec: v1beta1.TaskRunSpec{ diff --git a/pkg/entrypoint/entrypointer.go b/pkg/entrypoint/entrypointer.go index 92a13219a21..5fc8c59f429 100644 --- a/pkg/entrypoint/entrypointer.go +++ b/pkg/entrypoint/entrypointer.go @@ -31,6 +31,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" ) @@ -80,6 +81,10 @@ type Entrypointer struct { OnError string // StepMetadataDir is the directory for a step where the step related metadata can be stored StepMetadataDir string + // SpireWorkloadAPI connects to spire and does obtains SVID based on taskrun + SpireWorkloadAPI spire.EntrypointerAPIClient + // ResultsDirectory is the directory to find results, defaults to pipeline.DefaultResultPath + ResultsDirectory string } // Waiter encapsulates waiting for files to exist. @@ -136,13 +141,14 @@ func (e Entrypointer) Go() error { ResultType: v1beta1.InternalTektonResultType, }) + ctx := context.Background() var err error + if e.Timeout != nil && *e.Timeout < time.Duration(0) { err = fmt.Errorf("negative timeout specified") } if err == nil { - ctx := context.Background() var cancel context.CancelFunc if e.Timeout != nil && *e.Timeout != time.Duration(0) { ctx, cancel = context.WithTimeout(ctx, *e.Timeout) @@ -184,7 +190,11 @@ func (e Entrypointer) Go() error { // strings.Split(..) with an empty string returns an array that contains one element, an empty string. // This creates an error when trying to open the result folder as a file. if len(e.Results) >= 1 && e.Results[0] != "" { - if err := e.readResultsFromDisk(pipeline.DefaultResultPath); err != nil { + resultPath := pipeline.DefaultResultPath + if e.ResultsDirectory != "" { + resultPath = e.ResultsDirectory + } + if err := e.readResultsFromDisk(ctx, resultPath); err != nil { logger.Fatalf("Error while handling results: %s", err) } } @@ -192,7 +202,7 @@ func (e Entrypointer) Go() error { return err } -func (e Entrypointer) readResultsFromDisk(resultDir string) error { +func (e Entrypointer) readResultsFromDisk(ctx context.Context, resultDir string) error { output := []v1beta1.PipelineResourceResult{} for _, resultFile := range e.Results { if resultFile == "" { @@ -211,6 +221,15 @@ func (e Entrypointer) readResultsFromDisk(resultDir string) error { ResultType: v1beta1.TaskRunResultType, }) } + + if e.SpireWorkloadAPI != nil { + signed, err := e.SpireWorkloadAPI.Sign(ctx, output) + if err != nil { + return err + } + output = append(output, signed...) + } + // push output to termination path if len(output) != 0 { if err := termination.WriteMessage(e.TerminationPath, output); err != nil { diff --git a/pkg/entrypoint/entrypointer_test.go b/pkg/entrypoint/entrypointer_test.go index cb9abe44979..e6082c67539 100644 --- a/pkg/entrypoint/entrypointer_test.go +++ b/pkg/entrypoint/entrypointer_test.go @@ -24,6 +24,7 @@ import ( "io/ioutil" "os" "os/exec" + "path" "path/filepath" "reflect" "testing" @@ -31,8 +32,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/test/diff" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/logging" ) @@ -284,6 +287,7 @@ func TestReadResultsFromDisk(t *testing.T) { }, } { t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() terminationPath := "termination" if terminationFile, err := ioutil.TempFile("", "termination"); err != nil { t.Fatalf("unexpected error creating temporary termination file: %v", err) @@ -314,7 +318,7 @@ func TestReadResultsFromDisk(t *testing.T) { Results: resultsFilePath, TerminationPath: terminationPath, } - if err := e.readResultsFromDisk(""); err != nil { + if err := e.readResultsFromDisk(ctx, ""); err != nil { t.Fatal(err) } msg, err := ioutil.ReadFile(terminationPath) @@ -434,6 +438,167 @@ func TestEntrypointer_OnError(t *testing.T) { } } +func TestEntrypointerResults(t *testing.T) { + for _, c := range []struct { + desc, entrypoint, postFile, stepDir, stepDirLink string + waitFiles, args []string + resultsToWrite map[string]string + resultsOverride []string + breakpointOnFailure bool + sign bool + signVerify bool + }{{ + desc: "do nothing", + }, { + desc: "no results", + entrypoint: "echo", + }, { + desc: "write single result", + entrypoint: "echo", + resultsToWrite: map[string]string{ + "foo": "abc", + }, + }, { + desc: "write multiple result", + entrypoint: "echo", + resultsToWrite: map[string]string{ + "foo": "abc", + "bar": "def", + }, + }, { + // These next two tests show that if not results are defined in the entrypointer, then no signature is produced + // indicating that no signature was created. However, it is important to note that results were defined, + // but no results were created, that signature is still produced. + desc: "no results signed", + entrypoint: "echo", + sign: true, + signVerify: false, + }, { + desc: "defined results but no results produced signed", + entrypoint: "echo", + resultsOverride: []string{"foo"}, + sign: true, + signVerify: true, + }, { + desc: "write single result", + entrypoint: "echo", + resultsToWrite: map[string]string{ + "foo": "abc", + }, + sign: true, + signVerify: true, + }, { + desc: "write multiple result", + entrypoint: "echo", + resultsToWrite: map[string]string{ + "foo": "abc", + "bar": "def", + }, + sign: true, + signVerify: true, + }, { + desc: "write n/m results", + entrypoint: "echo", + resultsToWrite: map[string]string{ + "foo": "abc", + }, + resultsOverride: []string{"foo", "bar"}, + sign: true, + signVerify: true, + }} { + t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() + fw, fpw := &fakeWaiter{}, &fakePostWriter{} + var fr Runner = &fakeRunner{} + timeout := time.Duration(0) + terminationPath := "termination" + if terminationFile, err := ioutil.TempFile("", "termination"); err != nil { + t.Fatalf("unexpected error creating temporary termination file: %v", err) + } else { + terminationPath = terminationFile.Name() + defer os.Remove(terminationFile.Name()) + } + + resultsDir := createTmpDir(t, "results") + var results []string + if c.resultsToWrite != nil { + tmpResultsToWrite := map[string]string{} + for k, v := range c.resultsToWrite { + resultFile := path.Join(resultsDir, k) + tmpResultsToWrite[resultFile] = v + results = append(results, k) + } + + fr = &fakeResultsWriter{ + resultsToWrite: tmpResultsToWrite, + } + } + + signClient, verifyClient, tr := getMockSpireClient(ctx) + if !c.sign { + signClient = nil + } + + if c.resultsOverride != nil { + results = c.resultsOverride + } + + err := Entrypointer{ + Command: append([]string{c.entrypoint}, c.args...), + WaitFiles: c.waitFiles, + PostFile: c.postFile, + Waiter: fw, + Runner: fr, + PostWriter: fpw, + Results: results, + ResultsDirectory: resultsDir, + TerminationPath: terminationPath, + Timeout: &timeout, + BreakpointOnFailure: c.breakpointOnFailure, + StepMetadataDir: c.stepDir, + SpireWorkloadAPI: signClient, + }.Go() + if err != nil { + t.Fatalf("Entrypointer failed: %v", err) + } + + fileContents, err := ioutil.ReadFile(terminationPath) + if err == nil { + resultCheck := map[string]bool{} + var entries []v1beta1.PipelineResourceResult + if err := json.Unmarshal(fileContents, &entries); err != nil { + t.Fatalf("failed to unmarshal results: %v", err) + } + + for _, result := range entries { + if _, ok := c.resultsToWrite[result.Key]; ok { + if c.resultsToWrite[result.Key] == result.Value { + resultCheck[result.Key] = true + } else { + t.Errorf("expected result (%v) to have value %v, got %v", result.Key, result.Value, c.resultsToWrite[result.Key]) + } + } + } + + if len(resultCheck) != len(c.resultsToWrite) { + t.Error("number of results matching did not add up") + } + + // Check signature + verified := verifyClient.VerifyTaskRunResults(ctx, entries, tr) == nil + if verified != c.signVerify { + t.Errorf("expected signature verify result %v, got %v", c.signVerify, verified) + } + } else if !os.IsNotExist(err) { + t.Error("Wanted termination file written, got nil") + } + if err := os.Remove(terminationPath); err != nil { + t.Errorf("Could not remove termination path: %s", err) + } + }) + } +} + type fakeWaiter struct{ waited []string } func (f *fakeWaiter) Wait(file string, _ bool, _ bool) error { @@ -503,3 +668,55 @@ func (f *fakeExitErrorRunner) Run(ctx context.Context, args ...string) error { f.args = &args return exec.Command("ls", "/bogus/path").Run() } + +type fakeResultsWriter struct { + args *[]string + resultsToWrite map[string]string +} + +func (f *fakeResultsWriter) Run(ctx context.Context, args ...string) error { + f.args = &args + for k, v := range f.resultsToWrite { + err := ioutil.WriteFile(k, []byte(v), 0666) + if err != nil { + return err + } + } + return nil +} + +func createTmpDir(t *testing.T, name string) string { + tmpDir, err := ioutil.TempDir("", name) + if err != nil { + t.Fatalf("unexpected error creating temporary dir: %v", err) + } + return tmpDir +} + +func getMockSpireClient(ctx context.Context) (spire.EntrypointerAPIClient, spire.ControllerAPIClient, *v1beta1.TaskRun) { + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-example", + Namespace: "foo", + }, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskname", + APIVersion: "a1", + }, + ServiceAccountName: "test-sa", + }, + } + + sc := &spire.MockClient{} + + _ = sc.CreateEntries(ctx, tr, nil, 10000) + + // bootstrap with about 20 calls to sign which should be enough for testing + id := sc.GetIdentity(tr) + for i := 0; i < 20; i++ { + sc.SignIdentities = append(sc.SignIdentities, id) + } + + return sc, sc, tr +} diff --git a/pkg/pod/pod.go b/pkg/pod/pod.go index 087f6013352..28638aaebf0 100644 --- a/pkg/pod/pod.go +++ b/pkg/pod/pod.go @@ -29,6 +29,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/names" + "github.com/tektoncd/pipeline/pkg/spire" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -119,6 +120,12 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec featureFlags := config.FromContextOrDefaults(ctx).FeatureFlags alphaAPIEnabled := featureFlags.EnableAPIFields == config.AlphaAPIFields + // Entrypoint arg to enable or disable spire + var commonExtraEntrypointArgs []string + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire { + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire") + } + // Add our implicit volumes first, so they can be overridden by the user if they prefer. volumes = append(volumes, implicitVolumes...) volumeMounts = append(volumeMounts, implicitVolumeMounts...) @@ -186,11 +193,13 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec } readyImmediately := isPodReadyImmediately(*featureFlags, taskSpec.Sidecars) + // append credEntrypointArgs with entrypoint arg that contains if spire is enabled by configmap + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, credEntrypointArgs...) if alphaAPIEnabled { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, taskRun.Spec.Debug, !readyImmediately) } else { - stepContainers, err = orderContainers(credEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) + stepContainers, err = orderContainers(commonExtraEntrypointArgs, stepContainers, &taskSpec, nil, !readyImmediately) } if err != nil { return nil, err @@ -266,11 +275,37 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec // Add podTemplate Volumes to the explicitly declared use volumes volumes = append(volumes, taskSpec.Volumes...) volumes = append(volumes, podTemplate.Volumes...) - if err := v1beta1.ValidateVolumes(volumes); err != nil { return nil, err } + podAnnotations := kmeta.CopyMap(taskRun.Annotations) + if config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire { + volumes = append(volumes, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, + }) + + for i := range stepContainers { + c := &stepContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + } + mergedPodContainers := stepContainers // Merge sidecar containers with step containers. @@ -289,7 +324,6 @@ func (b *Builder) Build(ctx context.Context, taskRun *v1beta1.TaskRun, taskSpec priorityClassName = *podTemplate.PriorityClassName } - podAnnotations := kmeta.CopyMap(taskRun.Annotations) version, err := changeset.Get() if err != nil { return nil, err diff --git a/pkg/pod/pod_test.go b/pkg/pod/pod_test.go index 058346606b9..f079a4bbd40 100644 --- a/pkg/pod/pod_test.go +++ b/pkg/pod/pod_test.go @@ -35,6 +35,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/test/diff" "github.com/tektoncd/pipeline/test/names" corev1 "k8s.io/api/core/v1" @@ -87,6 +88,15 @@ func TestPodBuild(t *testing.T) { priorityClassName := "system-cluster-critical" taskRunName := "taskrun-name" + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + for _, c := range []struct { desc string trs v1beta1.TaskRunSpec @@ -1522,7 +1532,7 @@ _EOF_ }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1537,6 +1547,7 @@ _EOF_ "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-entrypoint", "cmd", "--", @@ -1544,6 +1555,9 @@ _EOF_ VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, }}, implicitVolumeMounts...), TerminationMessagePath: "/tekton/termination", Env: []corev1.EnvVar{ @@ -1553,6 +1567,13 @@ _EOF_ Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -1572,7 +1593,7 @@ _EOF_ }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1587,6 +1608,7 @@ _EOF_ "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-entrypoint", "cmd", "--", @@ -1594,6 +1616,9 @@ _EOF_ VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, }}, implicitVolumeMounts...), TerminationMessagePath: "/tekton/termination", Env: []corev1.EnvVar{ @@ -1605,6 +1630,13 @@ _EOF_ Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -1858,9 +1890,21 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 `}, } + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer} + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + containersVolumeMounts := append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { Name: "tekton-creds-init-home-0", MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, }}, implicitVolumeMounts...) containersVolumeMounts = append(containersVolumeMounts, debugScriptsVolumeMount) containersVolumeMounts = append(containersVolumeMounts, corev1.VolumeMount{ @@ -1891,7 +1935,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 }, want: &corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyNever, - InitContainers: []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}}), placeScriptsContainer}, + InitContainers: initContainers, Containers: []corev1.Container{{ Name: "step-name", Image: "image", @@ -1906,6 +1950,7 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 "/tekton/termination", "-step_metadata_dir", "/tekton/run/0/status", + "-enable_spire", "-breakpoint_on_failure", "-entrypoint", "cmd", @@ -1917,6 +1962,13 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 Volumes: append(implicitVolumes, debugScriptsVolume, debugInfoVolume, binVolume, scriptsVolume, runVolume(0), downwardVolume, corev1.Volume{ Name: "tekton-creds-init-home-0", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, }), ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, }, @@ -2006,6 +2058,167 @@ debug-fail-continue-heredoc-randomly-generated-mz4c7 } } +func TestPodBuildwithSpireEnabled(t *testing.T) { + initContainers := []corev1.Container{entrypointInitContainer(images.EntrypointImage, []v1beta1.Step{{Name: "name"}})} + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + + for _, c := range []struct { + desc string + trs v1beta1.TaskRunSpec + trAnnotation map[string]string + ts v1beta1.TaskSpec + want *corev1.PodSpec + wantAnnotations map[string]string + }{{ + desc: "simple with debug breakpoint onFailure", + trs: v1beta1.TaskRunSpec{ + Debug: &v1beta1.TaskRunDebug{ + Breakpoint: []string{breakpointOnFailure}, + }, + }, + ts: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Name: "name", + Image: "image", + Command: []string{"cmd"}, // avoid entrypoint lookup. + }}, + }, + want: &corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyNever, + InitContainers: initContainers, + Containers: []corev1.Container{{ + Name: "step-name", + Image: "image", + Command: []string{"/tekton/bin/entrypoint"}, + Args: []string{ + "-wait_file", + "/tekton/downward/ready", + "-wait_file_content", + "-post_file", + "/tekton/run/0/out", + "-termination_path", + "/tekton/termination", + "-step_metadata_dir", + "/tekton/run/0/status", + "-enable_spire", + "-entrypoint", + "cmd", + "--", + }, + VolumeMounts: append([]corev1.VolumeMount{binROMount, runMount(0, false), downwardMount, { + Name: "tekton-creds-init-home-0", + MountPath: "/tekton/creds", + }, { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }}, implicitVolumeMounts...), + TerminationMessagePath: "/tekton/termination", + }}, + Volumes: append(implicitVolumes, binVolume, runVolume(0), downwardVolume, corev1.Volume{ + Name: "tekton-creds-init-home-0", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumMemory}}, + }, corev1.Volume{ + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, + }), + ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + featureFlags := map[string]string{ + "enable-spire": "true", + } + names.TestingSeed() + store := config.NewStore(logtesting.TestLogger(t)) + store.OnConfigChanged( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Name: config.GetFeatureFlagsConfigName(), Namespace: system.Namespace()}, + Data: featureFlags, + }, + ) + kubeclient := fakek8s.NewSimpleClientset( + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "default", Namespace: "default"}}, + &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "service-account", Namespace: "default"}, + Secrets: []corev1.ObjectReference{{ + Name: "multi-creds", + }}, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-creds", + Namespace: "default", + Annotations: map[string]string{ + "tekton.dev/docker-0": "https://us.gcr.io", + "tekton.dev/docker-1": "https://docker.io", + "tekton.dev/git-0": "github.com", + "tekton.dev/git-1": "gitlab.com", + }}, + Type: "kubernetes.io/basic-auth", + Data: map[string][]byte{ + "username": []byte("foo"), + "password": []byte("BestEver"), + }, + }, + ) + var trAnnotations map[string]string + if c.trAnnotation == nil { + trAnnotations = map[string]string{ + ReleaseAnnotation: fakeVersion, + } + } else { + trAnnotations = c.trAnnotation + trAnnotations[ReleaseAnnotation] = fakeVersion + } + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "taskrun-name", + Namespace: "default", + Annotations: trAnnotations, + }, + Spec: c.trs, + } + + // No entrypoints should be looked up. + entrypointCache := fakeCache{} + builder := Builder{ + Images: images, + KubeClient: kubeclient, + EntrypointCache: entrypointCache, + } + + got, err := builder.Build(store.ToContext(context.Background()), tr, c.ts) + if err != nil { + t.Fatalf("builder.Build: %v", err) + } + + expectedName := kmeta.ChildName(tr.Name, "-pod") + if d := cmp.Diff(expectedName, got.Name); d != "" { + t.Errorf("Pod name does not match: %q", d) + } + + if d := cmp.Diff(c.want, &got.Spec, resourceQuantityCmp, volumeSort, volumeMountSort); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + + if c.wantAnnotations != nil { + if d := cmp.Diff(c.wantAnnotations, got.ObjectMeta.Annotations, cmpopts.IgnoreMapEntries(ignoreReleaseAnnotation)); d != "" { + t.Errorf("Annotation Diff(-want, +got):\n%s", d) + } + } + }) + } +} + func TestMakeLabels(t *testing.T) { taskRunName := "task-run-name" want := map[string]string{ diff --git a/pkg/pod/status.go b/pkg/pod/status.go index 5cddaffb546..7f35c47393a 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "context" "encoding/json" "fmt" "strconv" @@ -25,6 +26,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" @@ -100,11 +102,16 @@ func SidecarsReady(podStatus corev1.PodStatus) bool { } // MakeTaskRunStatus returns a TaskRunStatus based on the Pod's status. -func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod) (v1beta1.TaskRunStatus, error) { +func MakeTaskRunStatus(ctx context.Context, logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev1.Pod, spireEnabled bool, + spireAPI spire.ControllerAPIClient) (v1beta1.TaskRunStatus, error) { trs := &tr.Status if trs.GetCondition(apis.ConditionSucceeded) == nil || trs.GetCondition(apis.ConditionSucceeded).Status == corev1.ConditionUnknown { // If the taskRunStatus doesn't exist yet, it's because we just started running markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } sortPodContainerStatuses(pod.Status.ContainerStatuses, pod.Spec.Containers) @@ -114,7 +121,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev if complete { updateCompletedTaskRunStatus(logger, trs, pod) } else { - updateIncompleteTaskRunStatus(trs, pod) + updateIncompleteTaskRunStatus(trs, pod, spireEnabled) } trs.PodName = pod.Name @@ -132,7 +139,7 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev } var merr *multierror.Error - if err := setTaskRunStatusBasedOnStepStatus(logger, stepStatuses, &tr); err != nil { + if err := setTaskRunStatusBasedOnStepStatus(ctx, logger, stepStatuses, &tr, spireEnabled, spireAPI); err != nil { merr = multierror.Append(merr, err) } @@ -143,7 +150,30 @@ func MakeTaskRunStatus(logger *zap.SugaredLogger, tr v1beta1.TaskRun, pod *corev return *trs, merr.ErrorOrNil() } -func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun) *multierror.Error { +func setTaskRunStatusBasedOnSpireVerification(ctx context.Context, logger *zap.SugaredLogger, tr *v1beta1.TaskRun, trs *v1beta1.TaskRunStatus, + filteredResults []v1beta1.PipelineResourceResult, spireAPI spire.ControllerAPIClient) { + + if tr.IsSuccessful() && spireAPI != nil && + ((tr.Status.TaskSpec != nil && len(tr.Status.TaskSpec.Results) >= 1) || len(filteredResults) >= 1) { + logger.Info("validating signed results with spire: ", trs.TaskRunResults) + if err := spireAPI.VerifyTaskRunResults(ctx, filteredResults, tr); err != nil { + logger.Errorf("failed to verify signed results with spire: %w", err) + markStatusSignedResultsFailure(trs, err.Error()) + } else { + logger.Info("successfully validated signed results with spire") + markStatusSignedResultsVerified(trs) + } + } + + // If no results and no results requested, set verified unless results were specified as part of task spec + if len(filteredResults) == 0 && (tr.Status.TaskSpec == nil || len(tr.Status.TaskSpec.Results) == 0) { + markStatusSignedResultsVerified(trs) + } +} + +func setTaskRunStatusBasedOnStepStatus(ctx context.Context, logger *zap.SugaredLogger, stepStatuses []corev1.ContainerStatus, tr *v1beta1.TaskRun, + spireEnabled bool, spireAPI spire.ControllerAPIClient) *multierror.Error { + trs := &tr.Status var merr *multierror.Error @@ -155,7 +185,10 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ if err != nil { logger.Errorf("termination message could not be parsed as JSON: %v", err) merr = multierror.Append(merr, err) + } else { + logger.Info("Results: ", results) + time, err := extractStartedAtTimeFromResults(results) if err != nil { logger.Errorf("error setting the start time of step %q in taskrun %q: %v", s.Name, tr.Name, err) @@ -166,10 +199,13 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ logger.Errorf("error extracting the exit code of step %q in taskrun %q: %v", s.Name, tr.Name, err) merr = multierror.Append(merr, err) } - taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results) + taskResults, pipelineResourceResults, filteredResults := filterResultsAndResources(results, spireEnabled) if tr.IsSuccessful() { trs.TaskRunResults = append(trs.TaskRunResults, taskResults...) trs.ResourcesResult = append(trs.ResourcesResult, pipelineResourceResults...) + if spireEnabled { + setTaskRunStatusBasedOnSpireVerification(ctx, logger, tr, trs, filteredResults, spireAPI) + } } msg, err = createMessageFromResults(filteredResults) if err != nil { @@ -193,7 +229,6 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ ImageID: s.ImageID, }) } - return merr } @@ -220,7 +255,8 @@ func createMessageFromResults(results []v1beta1.PipelineResourceResult) (string, return string(bytes), nil } -func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { +func filterResultsAndResources(results []v1beta1.PipelineResourceResult, spireEnabled bool) ([]v1beta1.TaskRunResult, []v1beta1.PipelineResourceResult, []v1beta1.PipelineResourceResult) { + var taskResults []v1beta1.TaskRunResult var pipelineResourceResults []v1beta1.PipelineResourceResult var filteredResults []v1beta1.PipelineResourceResult @@ -235,6 +271,12 @@ func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1be // TODO(#4723): Validate that the type we inferred from aos is matching the // TaskResult Type before setting it to the taskRunResult. // TODO(#4723): Validate the taskrun results against taskresults for object val + if spireEnabled { + if r.Key == spire.KeySVID || r.Key == spire.KeyResultManifest || strings.HasSuffix(r.Key, spire.KeySignatureSuffix) { + filteredResults = append(filteredResults, r) + continue + } + } taskRunResult := v1beta1.TaskRunResult{ Name: r.Key, Type: v1beta1.ResultsType(aos.Type), @@ -316,10 +358,13 @@ func updateCompletedTaskRunStatus(logger *zap.SugaredLogger, trs *v1beta1.TaskRu trs.CompletionTime = &metav1.Time{Time: time.Now()} } -func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) { +func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod, spireEnabled bool) { switch pod.Status.Phase { case corev1.PodRunning: markStatusRunning(trs, v1beta1.TaskRunReasonRunning.String(), "Not all Steps in the Task have finished executing") + if spireEnabled { + markStatusSignedResultsRunning(trs) + } case corev1.PodPending: switch { case IsPodExceedingNodeResources(pod): @@ -330,6 +375,9 @@ func updateIncompleteTaskRunStatus(trs *v1beta1.TaskRunStatus, pod *corev1.Pod) markStatusRunning(trs, ReasonPullImageFailed, getWaitingMessage(pod)) default: markStatusRunning(trs, ReasonPending, getWaitingMessage(pod)) + if spireEnabled { + markStatusSignedResultsRunning(trs) + } } } } @@ -507,6 +555,36 @@ func markStatusSuccess(trs *v1beta1.TaskRunStatus) { }) } +// markStatusResultsVerified sets taskrun status to +func markStatusSignedResultsVerified(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) +} + +// markStatusFailure sets taskrun status to failure with specified reason +func markStatusSignedResultsFailure(trs *v1beta1.TaskRunStatus, message string) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: message, + }) +} + +// markStatusRunning sets taskrun status to running +func markStatusSignedResultsRunning(trs *v1beta1.TaskRunStatus) { + trs.SetCondition(&apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) +} + // sortPodContainerStatuses reorders a pod's container statuses so that // they're in the same order as the step containers from the TaskSpec. func sortPodContainerStatuses(podContainerStatuses []corev1.ContainerStatus, podSpecContainers []corev1.Container) { diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index e2c8f4e9dee..fd7c44880bd 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -17,6 +17,9 @@ limitations under the License. package pod import ( + "context" + "encoding/json" + "sort" "strings" "testing" "time" @@ -24,6 +27,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + "github.com/tektoncd/pipeline/pkg/termination" "github.com/tektoncd/pipeline/test/diff" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -66,6 +71,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { }}, }} { t.Run(c.desc, func(t *testing.T) { + ctx := context.Background() startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) tr := v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ @@ -80,7 +86,7 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - merr := setTaskRunStatusBasedOnStepStatus(logger, c.ContainerStatuses, &tr) + merr := setTaskRunStatusBasedOnStepStatus(ctx, logger, c.ContainerStatuses, &tr, false, nil) if merr != nil { t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr) } @@ -89,6 +95,396 @@ func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { } } +func TestMakeTaskRunStatusVerify(t *testing.T) { + sc := &spire.MockClient{} + processConditions := cmp.Transformer("sortConditionsAndFilterMessages", func(in []apis.Condition) []apis.Condition { + for i := range in { + in[i].Message = "" + } + sort.Slice(in, func(i, j int) bool { + return in[i].Type < in[j].Type + }) + return in + }) + + terminationMessageTrans := cmp.Transformer("sortAndPrint", func(in *corev1.ContainerStateTerminated) *corev1.ContainerStateTerminated { + prs, err := termination.ParseMessage(nil, in.Message) + if err != nil { + return in + } + sort.Slice(prs, func(i, j int) bool { + return prs[i].Key < prs[j].Key + }) + + b, _ := json.Marshal(prs) + in.Message = string(b) + + return in + }) + + // test awaiting results - OK + // results + test signed termination message - OK + // results + test unsigned termination message - OK + + // no task results, no result + test signed termiantion message + // no task results, no result + test unsigned termiantion message + // force task result, no result + test unsigned termiantion message + + statusSRVUnknown := func() duckv1beta1.Status { + status := statusRunning() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionUnknown, + Reason: v1beta1.AwaitingTaskRunResults.String(), + Message: "Waiting upon TaskRun results and signatures to verify", + }) + return status + } + + statusSRVVerified := func() duckv1beta1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + Reason: v1beta1.TaskRunReasonResultsVerified.String(), + Message: "Successfully verified all spire signed taskrun results", + }) + return status + } + + statusSRVUnverified := func() duckv1beta1.Status { + status := statusSuccess() + status.Conditions = append(status.Conditions, apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + Reason: v1beta1.TaskRunReasonsResultsVerificationFailed.String(), + Message: "", + }) + return status + } + + for _, c := range []struct { + desc string + specifyTaskRunResult bool + resultOut []v1beta1.PipelineResourceResult + podStatus corev1.PodStatus + pod corev1.Pod + want v1beta1.TaskRunStatus + }{{ + // test awaiting results + desc: "running pod awaiting results", + podStatus: corev1.PodStatus{}, + + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnknown(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{}, + Sidecars: []v1beta1.SidecarState{}, + }, + }, + }, { + desc: "test result with pipeline result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceName":"source-image"}]`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"digest","value":"sha256:1234","resourceName":"source-image"},{"key":"resultName","value":"resultValue","type":1}]`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + ResourcesResult: []v1beta1.PipelineResourceResult{{ + Key: "digest", + Value: "sha256:1234", + ResourceName: "source-image", + }}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewArrayOrString("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with pipeline result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "resultValue", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeString, + Value: *v1beta1.NewArrayOrString("resultValue"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test array result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{ + { + Key: "resultName", + Value: "[\"hello\",\"world\"]", + ResultType: v1beta1.TaskRunResultType, + }, + }, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: ``, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + TaskRunResults: []v1beta1.TaskRunResult{{ + Name: "resultName", + Type: v1beta1.ResultsTypeArray, + Value: *v1beta1.NewArrayOrString("hello", "world"), + }}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result with signed termination message", + resultOut: []v1beta1.PipelineResourceResult{}, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `to be overridden by signing`, + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result with no result without signed termination message", + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVVerified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }, { + desc: "test result (with task run result defined) with no result without signed termination message", + specifyTaskRunResult: true, + podStatus: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }, + }, + }}, + }, + want: v1beta1.TaskRunStatus{ + Status: statusSRVUnverified(), + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: "[]", + }}, + Name: "bar", + ContainerName: "step-bar", + }}, + Sidecars: []v1beta1.SidecarState{}, + // We don't actually care about the time, just that it's not nil + CompletionTime: &metav1.Time{Time: time.Now()}, + }, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + now := metav1.Now() + ctx := context.Background() + if cmp.Diff(c.pod, corev1.Pod{}) == "" { + c.pod = corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod", + Namespace: "foo", + CreationTimestamp: now, + }, + Status: c.podStatus, + } + } + + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) + tr := v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: startTime}, + }, + }, + } + + if c.specifyTaskRunResult { + // Specify result + tr.Status.TaskSpec = &v1beta1.TaskSpec{ + Results: []v1beta1.TaskResult{{ + Name: "some-task-result", + }}, + } + + c.want.TaskSpec = tr.Status.TaskSpec + } + + if err := sc.CreateEntries(ctx, &tr, &c.pod, 10000); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + + if c.resultOut != nil { + id := sc.GetIdentity(&tr) + for i := 0; i < 20; i++ { + sc.SignIdentities = append(sc.SignIdentities, id) + } + sigs, err := sc.Sign(ctx, c.resultOut) + if err != nil { + t.Fatalf("failed to sign: %v", err) + } + c.resultOut = append(c.resultOut, sigs...) + s, err := createMessageFromResults(c.resultOut) + if err != nil { + t.Fatalf("failed to create message from result: %v", err) + } + + c.podStatus.ContainerStatuses[0].State.Terminated.Message = s + c.want.TaskRunStatusFields.Steps[0].ContainerState.Terminated.Message = s + } + + logger, _ := logging.NewLogger("", "status") + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, true, sc) + if err != nil { + t.Errorf("MakeTaskRunResult: %s", err) + } + + // Common traits, set for test case brevity. + c.want.PodName = "pod" + c.want.StartTime = &metav1.Time{Time: startTime} + + ensureTimeNotNil := cmp.Comparer(func(x, y *metav1.Time) bool { + if x == nil { + return y == nil + } + return y != nil + }) + if d := cmp.Diff(c.want, got, ignoreVolatileTime, ensureTimeNotNil, processConditions, terminationMessageTrans); d != "" { + t.Errorf("Diff %s", diff.PrintWantGot(d)) + } + if tr.Status.StartTime.Time != c.want.StartTime.Time { + t.Errorf("Expected TaskRun startTime to be unchanged but was %s", tr.Status.StartTime) + } + + if err := sc.DeleteEntry(ctx, &tr, &c.pod); err != nil { + t.Fatalf("unable to create entry for tr: %v", tr.Name) + } + + }) + } +} + func TestMakeTaskRunStatus(t *testing.T) { for _, c := range []struct { desc string @@ -1061,7 +1457,7 @@ func TestMakeTaskRunStatus(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1275,7 +1671,7 @@ func TestMakeTaskRunStatusAlpha(t *testing.T) { }, } logger, _ := logging.NewLogger("", "status") - got, err := MakeTaskRunStatus(logger, tr, &c.pod) + got, err := MakeTaskRunStatus(context.Background(), logger, tr, &c.pod, false, nil) if err != nil { t.Errorf("MakeTaskRunResult: %s", err) } @@ -1396,7 +1792,7 @@ func TestMakeRunStatusJSONError(t *testing.T) { } logger, _ := logging.NewLogger("", "status") - gotTr, err := MakeTaskRunStatus(logger, tr, pod) + gotTr, err := MakeTaskRunStatus(context.Background(), logger, tr, pod, false, nil) if err == nil { t.Error("Expected error, got nil") } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 293b24a2141..3be6f1d3deb 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -29,6 +29,7 @@ import ( "github.com/tektoncd/pipeline/pkg/pod" cloudeventclient "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" resolutionclient "github.com/tektoncd/resolution/pkg/client/injection/client" resolutioninformer "github.com/tektoncd/resolution/pkg/client/injection/informers/resolution/v1alpha1/resolutionrequest" @@ -54,6 +55,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex resourceInformer := resourceinformer.Get(ctx) limitrangeInformer := limitrangeinformer.Get(ctx) resolutionInformer := resolutioninformer.Get(ctx) + spireControllerAPI := spire.GetControllerAPIClient(ctx) configStore := config.NewStore(logger.Named("config-store"), taskrunmetrics.MetricsOnStore(logger)) configStore.WatchConfigs(cmw) @@ -66,6 +68,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex KubeClientSet: kubeclientset, PipelineClientSet: pipelineclientset, Images: opts.Images, + SpireClient: spireControllerAPI, Clock: clock, taskRunLister: taskRunInformer.Lister(), resourceLister: resourceInformer.Lister(), @@ -77,6 +80,7 @@ func NewController(opts *pipeline.Options, clock clock.PassiveClock) func(contex pvcHandler: volumeclaim.NewPVCHandler(kubeclientset, logger), resolutionRequester: resolution.NewCRDRequester(resolutionclient.Get(ctx), resolutionInformer.Lister()), } + c.SpireClient.SetConfig(opts.SpireConfig) impl := taskrunreconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options { return controller.Options{ AgentName: pipeline.TaskRunControllerName, diff --git a/pkg/reconciler/taskrun/resources/image_exporter.go b/pkg/reconciler/taskrun/resources/image_exporter.go index b03b98a277d..ee80870beb7 100644 --- a/pkg/reconciler/taskrun/resources/image_exporter.go +++ b/pkg/reconciler/taskrun/resources/image_exporter.go @@ -33,7 +33,7 @@ func AddOutputImageDigestExporter( imageDigestExporterImage string, tr *v1beta1.TaskRun, taskSpec *v1beta1.TaskSpec, - gr GetResource, + gr GetResource, spireEnabled bool, ) error { output := []*image.Resource{} @@ -80,7 +80,7 @@ func AddOutputImageDigestExporter( } augmentedSteps = append(augmentedSteps, taskSpec.Steps...) - augmentedSteps = append(augmentedSteps, imageDigestExporterStep(imageDigestExporterImage, imagesJSON)) + augmentedSteps = append(augmentedSteps, imageDigestExporterStep(imageDigestExporterImage, imagesJSON, spireEnabled)) taskSpec.Steps = augmentedSteps } @@ -89,13 +89,19 @@ func AddOutputImageDigestExporter( return nil } -func imageDigestExporterStep(imageDigestExporterImage string, imagesJSON []byte) v1beta1.Step { +func imageDigestExporterStep(imageDigestExporterImage string, imagesJSON []byte, spireEnabled bool) v1beta1.Step { + // Add extra entrypoint arg to enable or disable spire + commonExtraEntrypointArgs := []string{ + "-images", string(imagesJSON), + } + if spireEnabled { + commonExtraEntrypointArgs = append(commonExtraEntrypointArgs, "-enable_spire") + } + return v1beta1.Step{ Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(imageDigestExporterContainerName), Image: imageDigestExporterImage, Command: []string{"/ko-app/imagedigestexporter"}, - Args: []string{ - "-images", string(imagesJSON), - }, + Args: commonExtraEntrypointArgs, } } diff --git a/pkg/reconciler/taskrun/resources/image_exporter_test.go b/pkg/reconciler/taskrun/resources/image_exporter_test.go index aba62bf4ab3..66b752edbb1 100644 --- a/pkg/reconciler/taskrun/resources/image_exporter_test.go +++ b/pkg/reconciler/taskrun/resources/image_exporter_test.go @@ -183,7 +183,175 @@ func TestAddOutputImageDigestExporter(t *testing.T) { }, }, nil } - err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr) + err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr, false) + if err != nil { + t.Fatalf("Failed to declare output resources for test %q: error %v", c.desc, err) + } + + if d := cmp.Diff(c.task.Spec.Steps, c.wantSteps); d != "" { + t.Fatalf("post build steps mismatch %s", diff.PrintWantGot(d)) + } + }) + } +} + +func TestAddOutputImageDigestExporterWithSpire(t *testing.T) { + for _, c := range []struct { + desc string + task *v1beta1.Task + taskRun *v1beta1.TaskRun + wantSteps []v1beta1.Step + }{{ + desc: "image resource declared as both input and output", + task: &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task1", + Namespace: "marshmallow", + }, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Name: "step1", + }}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "source-image", + Type: "image", + }, + }}, + Outputs: []v1beta1.TaskResource{{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "source-image", + Type: "image", + }, + }}, + }, + }, + }, + taskRun: &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun-run-output-steps", + Namespace: "marshmallow", + }, + Spec: v1beta1.TaskRunSpec{ + Resources: &v1beta1.TaskRunResources{ + Inputs: []v1beta1.TaskResourceBinding{{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "source-image", + ResourceRef: &v1beta1.PipelineResourceRef{ + Name: "source-image-1", + }, + }, + }}, + Outputs: []v1beta1.TaskResourceBinding{{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "source-image", + ResourceRef: &v1beta1.PipelineResourceRef{ + Name: "source-image-1", + }, + }, + }}, + }, + }, + }, + wantSteps: []v1beta1.Step{{ + Name: "step1", + }, { + Name: "image-digest-exporter-9l9zj", + Image: "override-with-imagedigest-exporter-image:latest", + Command: []string{"/ko-app/imagedigestexporter"}, + Args: []string{"-images", "[{\"name\":\"source-image\",\"type\":\"image\",\"url\":\"gcr.io/some-image-1\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/source-image\"}]", "-enable_spire"}, + }}, + }, { + desc: "image resource in task with multiple steps", + task: &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task1", + Namespace: "marshmallow", + }, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Name: "step1", + }, { + Name: "step2", + }}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "source-image", + Type: "image", + }, + }}, + Outputs: []v1beta1.TaskResource{{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "source-image", + Type: "image", + }, + }}, + }, + }, + }, + taskRun: &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun-run-output-steps", + Namespace: "marshmallow", + }, + Spec: v1beta1.TaskRunSpec{ + Resources: &v1beta1.TaskRunResources{ + Inputs: []v1beta1.TaskResourceBinding{{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "source-image", + ResourceRef: &v1beta1.PipelineResourceRef{ + Name: "source-image-1", + }, + }, + }}, + Outputs: []v1beta1.TaskResourceBinding{{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "source-image", + ResourceRef: &v1beta1.PipelineResourceRef{ + Name: "source-image-1", + }, + }, + }}, + }, + }, + }, + wantSteps: []v1beta1.Step{{ + Name: "step1", + }, { + Name: "step2", + }, { + Name: "image-digest-exporter-9l9zj", + Image: "override-with-imagedigest-exporter-image:latest", + Command: []string{"/ko-app/imagedigestexporter"}, + Args: []string{"-images", "[{\"name\":\"source-image\",\"type\":\"image\",\"url\":\"gcr.io/some-image-1\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/source-image\"}]", "-enable_spire"}, + }}, + }} { + t.Run(c.desc, func(t *testing.T) { + names.TestingSeed() + gr := func(n string) (*resourcev1alpha1.PipelineResource, error) { + return &resourcev1alpha1.PipelineResource{ + ObjectMeta: metav1.ObjectMeta{ + Name: "source-image-1", + Namespace: "marshmallow", + }, + Spec: resourcev1alpha1.PipelineResourceSpec{ + Type: "image", + Params: []v1beta1.ResourceParam{{ + Name: "url", + Value: "gcr.io/some-image-1", + }, { + Name: "digest", + Value: "", + }, { + Name: "OutputImageDir", + Value: "/workspace/source-image-1/index.json", + }}, + }, + }, nil + } + err := AddOutputImageDigestExporter("override-with-imagedigest-exporter-image:latest", c.taskRun, &c.task.Spec, gr, true) if err != nil { t.Fatalf("Failed to declare output resources for test %q: error %v", c.desc, err) } diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index b927e84d5e3..c4f6533b7c4 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -44,6 +44,7 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" "github.com/tektoncd/pipeline/pkg/remote" + "github.com/tektoncd/pipeline/pkg/spire" "github.com/tektoncd/pipeline/pkg/taskrunmetrics" _ "github.com/tektoncd/pipeline/pkg/taskrunmetrics/fake" // Make sure the taskrunmetrics are setup "github.com/tektoncd/pipeline/pkg/workspace" @@ -70,6 +71,7 @@ type Reconciler struct { KubeClientSet kubernetes.Interface PipelineClientSet clientset.Interface Images pipeline.Images + SpireClient spire.ControllerAPIClient Clock clock.PassiveClock // listers index properties about resources @@ -286,13 +288,17 @@ func (c *Reconciler) finishReconcileUpdateEmitEvents(ctx context.Context, tr *v1 // Send k8s events and cloud events (when configured) events.Emit(ctx, beforeCondition, afterCondition, tr) - _, err := c.updateLabelsAndAnnotations(ctx, tr) + var err error + + merr := multierror.Append(previousError, err).ErrorOrNil() + + _, err = c.updateLabelsAndAnnotations(ctx, tr) if err != nil { logger.Warn("Failed to update TaskRun labels/annotations", zap.Error(err)) events.EmitError(controller.GetEventRecorder(ctx), err, tr) } - merr := multierror.Append(previousError, err).ErrorOrNil() + merr = multierror.Append(previousError, err).ErrorOrNil() if controller.IsPermanentError(previousError) { return controller.NewPermanentError(merr) } @@ -430,6 +436,7 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re // Get the TaskRun's Pod if it should have one. Otherwise, create the Pod. var pod *corev1.Pod var err error + spireEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire if tr.Status.PodName != "" { pod, err = c.podLister.Pods(tr.Namespace).Get(tr.Status.PodName) @@ -488,16 +495,27 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re } if podconvert.SidecarsReady(pod.Status) { + if spireEnabled { + // TTL for the entry is in seconds + ttl := time.Duration(config.FromContextOrDefaults(ctx).Defaults.DefaultTimeoutMinutes) * time.Minute + if err = c.SpireClient.CreateEntries(ctx, tr, pod, ttl); err != nil { + logger.Errorf("Failed to create workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Created SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + if err := podconvert.UpdateReady(ctx, c.KubeClientSet, *pod); err != nil { return err } if err := c.metrics.RecordPodLatency(ctx, pod, tr); err != nil { logger.Warnf("Failed to log the metrics : %v", err) } + } // Convert the Pod's status to the equivalent TaskRun Status. - tr.Status, err = podconvert.MakeTaskRunStatus(logger, *tr, pod) + tr.Status, err = podconvert.MakeTaskRunStatus(ctx, logger, *tr, pod, spireEnabled, c.SpireClient) if err != nil { return err } @@ -507,6 +525,14 @@ func (c *Reconciler) reconcile(ctx context.Context, tr *v1beta1.TaskRun, rtr *re return err } + if spireEnabled && tr.IsDone() { + if err := c.SpireClient.DeleteEntry(ctx, tr, pod); err != nil { + logger.Infof("Failed to remove workload SPIFFE entry for taskrun %v: %v", tr.Name, err) + return err + } + logger.Infof("Deleted SPIFFE workload entry for %v/%v", tr.Namespace, tr.Name) + } + logger.Infof("Successfully reconciled taskrun %s/%s with status: %#v", tr.Name, tr.Namespace, tr.Status.GetCondition(apis.ConditionSucceeded)) return nil } @@ -671,8 +697,11 @@ func (c *Reconciler) createPod(ctx context.Context, ts *v1beta1.TaskSpec, tr *v1 return nil, err } + // check if spire is enabled to pass to ImageDigestExporter + spireEnabled := config.FromContextOrDefaults(ctx).FeatureFlags.EnableSpire + // Get actual resource - err = resources.AddOutputImageDigestExporter(c.Images.ImageDigestExporterImage, tr, ts, c.resourceLister.PipelineResources(tr.Namespace).Get) + err = resources.AddOutputImageDigestExporter(c.Images.ImageDigestExporterImage, tr, ts, c.resourceLister.PipelineResources(tr.Namespace).Get, spireEnabled) if err != nil { logger.Errorf("Failed to create a pod for taskrun: %s due to output image resource error %v", tr.Name, err) return nil, err diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 8d4e63970de..04e84d8e206 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -43,6 +43,8 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/reconciler/volumeclaim" + "github.com/tektoncd/pipeline/pkg/spire" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" "github.com/tektoncd/pipeline/test" "github.com/tektoncd/pipeline/test/diff" eventstest "github.com/tektoncd/pipeline/test/events" @@ -91,6 +93,7 @@ var ( PRImage: "override-with-pr:latest", ImageDigestExporterImage: "override-with-imagedigest-exporter-image:latest", } + spireConfig = spireconfig.SpireConfig{MockSpire: true} now = time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC) ignoreLastTransitionTime = cmpopts.IgnoreFields(apis.Condition{}, "LastTransitionTime.Inner.Time") // Pods are created with a random 5-character suffix that we want to @@ -522,7 +525,7 @@ func ensureConfigurationConfigMapsExist(d *test.Data) { func getTaskRunController(t *testing.T, d test.Data) (test.Assets, func()) { t.Helper() names.TestingSeed() - return initializeTaskRunControllerAssets(t, d, pipeline.Options{Images: images}) + return initializeTaskRunControllerAssets(t, d, pipeline.Options{Images: images, SpireConfig: spireConfig}) } func initializeTaskRunControllerAssets(t *testing.T, d test.Data, opts pipeline.Options) (test.Assets, func()) { @@ -597,7 +600,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -605,7 +608,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { saName := tc.taskRun.Spec.ServiceAccountName @@ -1005,7 +1008,7 @@ spec: image: "foo", name: "simple-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "serviceaccount", taskRun: taskRunWithSaSuccess, @@ -1017,7 +1020,7 @@ spec: image: "foo", name: "sa-step", cmd: "/mycmd", - }}), + }}, false), }, { name: "params", taskRun: taskRunSubstitution, @@ -1082,7 +1085,7 @@ spec: "[{\"name\":\"myimage\",\"type\":\"image\",\"url\":\"gcr.io/kristoff/sven\",\"digest\":\"\",\"OutputImageDir\":\"/workspace/output/myimage\"}]", }, }, - }), + }, false), }, { name: "taskrun-with-taskspec", taskRun: taskRunWithTaskSpec, @@ -1112,7 +1115,7 @@ spec: "--my-arg=foo", }, }, - }), + }, false), }, { name: "success-with-cluster-task", taskRun: taskRunWithClusterTask, @@ -1124,7 +1127,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-resource-spec-task-spec", taskRun: taskRunWithResourceSpecAndTaskSpec, @@ -1153,7 +1156,7 @@ spec: image: "ubuntu", cmd: "/mycmd", }, - }), + }, false), }, { name: "taskrun-with-pod", taskRun: taskRunWithPod, @@ -1165,7 +1168,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }, { name: "taskrun-with-credentials-variable-default-tekton-creds", taskRun: taskRunWithCredentialsVariable, @@ -1177,7 +1180,7 @@ spec: name: "mycontainer", image: "myimage", cmd: "/mycmd /tekton/creds", - }}), + }}, false), }, { name: "remote-task", taskRun: taskRunBundle, @@ -1189,7 +1192,7 @@ spec: name: "simple-step", image: "foo", cmd: "/mycmd", - }}), + }}, false), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -1315,12 +1318,28 @@ spec: "Normal Started ", "Normal Running Not all Steps", }, - wantPod: expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, nil, []stepForExpectedPod{{ - name: "mycontainer", - image: "myimage", - stdoutPath: "stdout.txt", - cmd: "/mycmd", - }}), + wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-pod", "", "test-taskrun-with-output-config", "foo", config.DefaultServiceAccountValue, false, + []corev1.Volume{ + { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, + }}, []stepForExpectedPod{{ + name: "mycontainer", + image: "myimage", + stdoutPath: "stdout.txt", + cmd: "/mycmd", + }}, true), + []corev1.VolumeMount{ + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }, + }, + ), }, { name: "taskrun-with-output-config-ws", taskRun: taskRunWithOutputConfigAndWorkspace, @@ -1329,22 +1348,38 @@ spec: "Normal Running Not all Steps", }, wantPod: addVolumeMounts(expectedPod("test-taskrun-with-output-config-ws-pod", "", "test-taskrun-with-output-config-ws", "foo", config.DefaultServiceAccountValue, false, - []corev1.Volume{{ - Name: "ws-9l9zj", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{}, + []corev1.Volume{ + { + Name: "ws-9l9zj", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, { + Name: spire.WorkloadAPI, + VolumeSource: corev1.VolumeSource{ + CSI: &corev1.CSIVolumeSource{ + Driver: "csi.spiffe.io", + }, + }, }, - }}, + }, []stepForExpectedPod{{ name: "mycontainer", image: "myimage", stdoutPath: "stdout.txt", cmd: "/mycmd", - }}), - []corev1.VolumeMount{{ - Name: "ws-9l9zj", - MountPath: "/workspace/data", - }}), + }}, true), + []corev1.VolumeMount{ + { + Name: "ws-9l9zj", + MountPath: "/workspace/data", + }, + { + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }, + }, + ), }} { t.Run(tc.name, func(t *testing.T) { testAssets, cancel := getTaskRunController(t, d) @@ -1405,8 +1440,8 @@ spec: } func addVolumeMounts(p *corev1.Pod, vms []corev1.VolumeMount) *corev1.Pod { - for i, vm := range vms { - p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vm) + for i := range p.Spec.Containers { + p.Spec.Containers[i].VolumeMounts = append(p.Spec.Containers[i].VolumeMounts, vms...) } return p } @@ -4516,7 +4551,7 @@ func podVolumeMounts(idx, totalSteps int) []corev1.VolumeMount { return mnts } -func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int) []string { +func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs []string, idx int, alpha bool) []string { args := []string{ "-wait_file", } @@ -4525,6 +4560,7 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs [] } else { args = append(args, fmt.Sprintf("/tekton/run/%d/out", idx-1)) } + args = append(args, "-post_file", fmt.Sprintf("/tekton/run/%d/out", idx), @@ -4533,6 +4569,9 @@ func podArgs(cmd string, stdoutPath string, stderrPath string, additionalArgs [] "-step_metadata_dir", fmt.Sprintf("/tekton/run/%d/status", idx), ) + if alpha { + args = append(args, "-enable_spire") + } if stdoutPath != "" { args = append(args, "-stdout_path", stdoutPath) } @@ -4594,11 +4633,23 @@ type stepForExpectedPod struct { stderrPath string } -func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod) *corev1.Pod { +func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTask bool, extraVolumes []corev1.Volume, steps []stepForExpectedPod, alpha bool) *corev1.Pod { stepNames := make([]string, 0, len(steps)) for _, s := range steps { stepNames = append(stepNames, fmt.Sprintf("step-%s", s.name)) } + + initContainers := []corev1.Container{placeToolsInitContainer(stepNames)} + if alpha { + for i := range initContainers { + c := &initContainers[i] + c.VolumeMounts = append(c.VolumeMounts, corev1.VolumeMount{ + Name: spire.WorkloadAPI, + MountPath: spire.VolumeMountPath, + }) + } + } + p := &corev1.Pod{ ObjectMeta: podObjectMeta(podName, taskName, taskRunName, ns, isClusterTask), Spec: corev1.PodSpec{ @@ -4610,7 +4661,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas binVolume, downwardVolume, }, - InitContainers: []corev1.Container{placeToolsInitContainer(stepNames)}, + InitContainers: initContainers, RestartPolicy: corev1.RestartPolicyNever, ActiveDeadlineSeconds: &defaultActiveDeadlineSeconds, ServiceAccountName: saName, @@ -4631,7 +4682,7 @@ func expectedPod(podName, taskName, taskRunName, ns, saName string, isClusterTas VolumeMounts: podVolumeMounts(idx, len(steps)), TerminationMessagePath: "/tekton/termination", } - stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx) + stepContainer.Args = podArgs(s.cmd, s.stdoutPath, s.stderrPath, s.args, idx, alpha) for k, v := range s.envVars { stepContainer.Env = append(stepContainer.Env, corev1.EnvVar{ diff --git a/pkg/reconciler/testing/logger.go b/pkg/reconciler/testing/logger.go index 386e10837a7..e53ee569dcc 100644 --- a/pkg/reconciler/testing/logger.go +++ b/pkg/reconciler/testing/logger.go @@ -31,6 +31,16 @@ func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) { return WithLogger(ctx, t), informer } +// SetupDefaultContext sets up the the Context and the default filtered informers for the tests. +func SetupDefaultContext(t *testing.T) (context.Context, []controller.Informer) { + ctx, _, informer := setupDefaultContextWithLabelKey(t) + cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{ + SendSuccessfully: true, + } + ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour) + return WithLogger(ctx, t), informer +} + // WithLogger returns the the Logger func WithLogger(ctx context.Context, t *testing.T) context.Context { return logging.WithLogger(ctx, TestLogger(t)) @@ -51,3 +61,13 @@ func setupFakeContextWithLabelKey(t zaptest.TestingT) (context.Context, context. ctx, is := injection.Fake.SetupInformers(ctx, &rest.Config{}) return ctx, c, is } + +// setupDefaultContextWithLabelKey sets up the the Context and the default informers for the tests +// The provided context includes the FilteredInformerFactory LabelKey. +func setupDefaultContextWithLabelKey(t zaptest.TestingT) (context.Context, context.CancelFunc, []controller.Informer) { + ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t)) + ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey) + ctx, is := injection.Default.SetupInformers(ctx, &rest.Config{}) + return ctx, c, is +} diff --git a/pkg/spire/config/config.go b/pkg/spire/config/config.go new file mode 100644 index 00000000000..fe08f2cf485 --- /dev/null +++ b/pkg/spire/config/config.go @@ -0,0 +1,68 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strings" +) + +// SpireConfig holds the images reference for a number of container images used +// across tektoncd pipelines. +type SpireConfig struct { + // The trust domain corresponds to the trust root of a SPIFFE identity provider. + TrustDomain string + // Path to the spire agent socket defined by the CSI driver + SocketPath string + // Spire server address + ServerAddr string + // Prefix to attach to the node name when registering it with the spire server + NodeAliasPrefix string + + // MockSpire only to be used for testing the controller, will not exhibit + // all characteristics of spire since it is only being used in the context + // of process memory. + MockSpire bool +} + +// Validate returns an error if any image is not set. +func (c SpireConfig) Validate() error { + var unset []string + for _, f := range []struct { + v, name string + }{ + {c.TrustDomain, "spire-trust-domain"}, + {c.SocketPath, "spire-socket-path"}, + {c.ServerAddr, "spire-server-addr"}, + {c.NodeAliasPrefix, "spire-node-alias-prefix"}, + } { + if f.v == "" { + unset = append(unset, f.name) + } + } + if len(unset) > 0 { + sort.Strings(unset) + return fmt.Errorf("found unset image flags: %s", unset) + } + + if !strings.HasPrefix(c.NodeAliasPrefix, "/") { + return fmt.Errorf("Spire node alias should start with a /") + } + + return nil +} diff --git a/pkg/spire/controller.go b/pkg/spire/controller.go new file mode 100644 index 00000000000..410c9c2ad63 --- /dev/null +++ b/pkg/spire/controller.go @@ -0,0 +1,314 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" + spiffetypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withControllerClient) +} + +// controllerKey is a way to associate the ControllerAPIClient from inside the context.Context +type controllerKey struct{} + +// GetControllerAPIClient extracts the ControllerAPIClient from the context. +func GetControllerAPIClient(ctx context.Context) ControllerAPIClient { + untyped := ctx.Value(controllerKey{}) + if untyped == nil { + logging.FromContext(ctx).Errorf("Unable to fetch client from context.") + return nil + } + return untyped.(*spireControllerAPIClient) +} + +func withControllerClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{}) +} + +type spireControllerAPIClient struct { + config *spireconfig.SpireConfig + serverConn *grpc.ClientConn + workloadConn *workloadapi.X509Source + entryClient entryv1.EntryClient + workloadAPI *workloadapi.Client +} + +func (sc *spireControllerAPIClient) setupClient(ctx context.Context) error { + if sc.config == nil { + return errors.New("config has not been set yet") + } + if sc.entryClient == nil || sc.workloadConn == nil || sc.workloadAPI == nil || sc.serverConn == nil { + return sc.dial(ctx) + } + return nil +} + +func (sc *spireControllerAPIClient) dial(ctx context.Context) error { + if sc.workloadConn == nil { + // Create X509Source - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + source, err := workloadapi.NewX509Source(ctx, workloadapi.WithClientOptions(workloadapi.WithAddr(sc.config.SocketPath))) + if err != nil { + return fmt.Errorf("unable to create X509Source for SPIFFE client: %w", err) + } + sc.workloadConn = source + } + + if sc.workloadAPI == nil { + // spire workloadapi client for controller - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + client, err := workloadapi.New(ctx, workloadapi.WithAddr(sc.config.SocketPath)) + if err != nil { + return fmt.Errorf("spire workload API not initialized due to error: %w", err) + } + sc.workloadAPI = client + } + + if sc.serverConn == nil { + // Create connection to spire server + tlsConfig := tlsconfig.MTLSClientConfig(sc.workloadConn, sc.workloadConn, tlsconfig.AuthorizeAny()) + conn, err := grpc.DialContext(ctx, sc.config.ServerAddr, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + sc.workloadConn.Close() + sc.workloadConn = nil + return fmt.Errorf("unable to dial SPIRE server: %w", err) + } + sc.serverConn = conn + } + + if sc.entryClient == nil { + sc.entryClient = entryv1.NewEntryClient(sc.serverConn) + } + + return nil +} + +// SetConfig sets the spire configuration for ControllerAPIClient +func (sc *spireControllerAPIClient) SetConfig(c spireconfig.SpireConfig) { + sc.config = &c +} + +func (sc *spireControllerAPIClient) fetchControllerSVID(ctx context.Context) (*x509svid.SVID, error) { + xsvid, err := sc.workloadAPI.FetchX509SVID(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch controller SVID: %w", err) + } + return xsvid, nil +} + +func (sc *spireControllerAPIClient) nodeEntry(nodeName string) *spiffetypes.Entry { + selectors := []*spiffetypes.Selector{ + { + Type: "k8s_psat", + Value: "agent_ns:spire", + }, + { + Type: "k8s_psat", + Value: "agent_node_name:" + nodeName, + }, + } + + return &spiffetypes.Entry{ + SpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, nodeName), + }, + ParentId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: "/spire/server", + }, + Selectors: selectors, + } +} + +func (sc *spireControllerAPIClient) workloadEntry(tr *v1beta1.TaskRun, pod *corev1.Pod, expiry int64) *spiffetypes.Entry { + // Note: We can potentially add attestation on the container images as well since + // the information is available here. + selectors := []*spiffetypes.Selector{ + { + Type: "k8s", + Value: "pod-uid:" + string(pod.UID), + }, + { + Type: "k8s", + Value: "pod-name:" + pod.Name, + }, + } + + return &spiffetypes.Entry{ + SpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name), + }, + ParentId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, pod.Spec.NodeName), + }, + Selectors: selectors, + ExpiresAt: expiry, + } +} + +// ttl is the TTL for the SPIRE entry in seconds, not the SVID TTL +func (sc *spireControllerAPIClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + expiryTime := time.Now().Unix() + int64(ttl) + entries := []*spiffetypes.Entry{ + sc.nodeEntry(pod.Spec.NodeName), + sc.workloadEntry(tr, pod, expiryTime), + } + + req := entryv1.BatchCreateEntryRequest{ + Entries: entries, + } + + resp, err := sc.entryClient.BatchCreateEntry(ctx, &req) + if err != nil { + return err + } + + if len(resp.Results) != len(entries) { + return fmt.Errorf("batch create entry failed, malformed response expected %v result", len(entries)) + } + + var errPaths []string + var errCodes []int32 + + for _, r := range resp.Results { + if codes.Code(r.Status.Code) != codes.AlreadyExists && + codes.Code(r.Status.Code) != codes.OK { + errPaths = append(errPaths, r.Entry.SpiffeId.Path) + errCodes = append(errCodes, r.Status.Code) + } + } + + if len(errPaths) != 0 { + return fmt.Errorf("batch create entry failed for entries %+v with codes %+v", errPaths, errCodes) + } + return nil +} + +func (sc *spireControllerAPIClient) getEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) ([]*spiffetypes.Entry, error) { + req := &entryv1.ListEntriesRequest{ + Filter: &entryv1.ListEntriesRequest_Filter{ + BySpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name), + }, + }, + } + + entries := []*spiffetypes.Entry{} + for { + resp, err := sc.entryClient.ListEntries(ctx, req) + if err != nil { + return nil, err + } + + entries = append(entries, resp.Entries...) + + if resp.NextPageToken == "" { + break + } + + req.PageToken = resp.NextPageToken + } + + return entries, nil +} + +func (sc *spireControllerAPIClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error { + entries, err := sc.getEntries(ctx, tr, pod) + if err != nil { + return err + } + + var ids []string + for _, e := range entries { + ids = append(ids, e.Id) + } + + req := &entryv1.BatchDeleteEntryRequest{ + Ids: ids, + } + resp, err := sc.entryClient.BatchDeleteEntry(ctx, req) + if err != nil { + return err + } + + var errIds []string + var errCodes []int32 + + for _, r := range resp.Results { + if codes.Code(r.Status.Code) != codes.NotFound && + codes.Code(r.Status.Code) != codes.OK { + errIds = append(errIds, r.Id) + errCodes = append(errCodes, r.Status.Code) + } + } + + if len(errIds) != 0 { + return fmt.Errorf("batch delete entry failed for ids %+v with codes %+v", errIds, errCodes) + } + + return nil +} + +func (sc *spireControllerAPIClient) Close() error { + var err error + if sc.serverConn != nil { + err = sc.serverConn.Close() + if err != nil { + return err + } + } + if sc.workloadAPI != nil { + err = sc.workloadAPI.Close() + if err != nil { + return err + } + } + if sc.workloadConn != nil { + err = sc.workloadConn.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/spire/entrypointer.go b/pkg/spire/entrypointer.go new file mode 100644 index 00000000000..33b529568f0 --- /dev/null +++ b/pkg/spire/entrypointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" +) + +// NewEntrypointerAPIClient creates the EntrypointerAPIClient +func NewEntrypointerAPIClient(c *spireconfig.SpireConfig) EntrypointerAPIClient { + return &spireEntrypointerAPIClient{ + config: c, + } +} + +type spireEntrypointerAPIClient struct { + config *spireconfig.SpireConfig + client *workloadapi.Client +} + +func (w *spireEntrypointerAPIClient) setupClient(ctx context.Context) error { + if w.config == nil { + return errors.New("config has not been set yet") + } + if w.client == nil { + return w.dial(ctx) + } + return nil +} + +func (w *spireEntrypointerAPIClient) dial(ctx context.Context) error { + // spire workloadapi client for entrypoint - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + client, err := workloadapi.New(ctx, workloadapi.WithAddr(w.config.SocketPath)) + if err != nil { + return errors.Wrap(err, "spire workload API not initialized due to error") + } + w.client = client + return nil +} + +func (w *spireEntrypointerAPIClient) getWorkloadSVID(ctx context.Context) (*x509svid.SVID, error) { + backOff := 2 + var xsvid *x509svid.SVID + var err error + for i := 0; i < 20; i += backOff { + xsvid, err = w.client.FetchX509SVID(ctx) + if err == nil { + break + } + time.Sleep(time.Duration(backOff) * time.Second) + } + if xsvid != nil && len(xsvid.Certificates) > 0 { + return xsvid, nil + } + return nil, errors.Wrap(err, "requested SVID failed to get fetched and timed out") +} + +func (w *spireEntrypointerAPIClient) Close() error { + if w.client != nil { + err := w.client.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/spire/sign.go b/pkg/spire/sign.go new file mode 100644 index 00000000000..730d0c4178c --- /dev/null +++ b/pkg/spire/sign.go @@ -0,0 +1,148 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/pem" + "strings" + + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +// Signs the TaskRun results with the TaskRun spire SVID and appends the results to PipelineResourceResult +func (w *spireEntrypointerAPIClient) Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + err := w.setupClient(ctx) + if err != nil { + return nil, err + } + + xsvid, err := w.getWorkloadSVID(ctx) + if err != nil { + return nil, err + } + + output := []v1beta1.PipelineResourceResult{} + p := pem.EncodeToMemory(&pem.Block{ + Bytes: xsvid.Certificates[0].Raw, + Type: "CERTIFICATE", + }) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeySVID, + Value: string(p), + ResultType: v1beta1.TaskRunResultType, + }) + + for _, r := range results { + if r.ResultType == v1beta1.TaskRunResultType { + resultValue, err := getResultValue(r) + if err != nil { + return nil, err + } + s, err := signWithKey(xsvid, resultValue) + if err != nil { + return nil, err + } + output = append(output, v1beta1.PipelineResourceResult{ + Key: r.Key + KeySignatureSuffix, + Value: base64.StdEncoding.EncodeToString(s), + ResultType: v1beta1.TaskRunResultType, + }) + } + } + // get complete manifest of keys such that it can be verified + manifest := getManifest(results) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest, + Value: manifest, + ResultType: v1beta1.TaskRunResultType, + }) + manifestSig, err := signWithKey(xsvid, manifest) + if err != nil { + return nil, err + } + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest + KeySignatureSuffix, + Value: base64.StdEncoding.EncodeToString(manifestSig), + ResultType: v1beta1.TaskRunResultType, + }) + + return output, nil +} + +func signWithKey(xsvid *x509svid.SVID, value string) ([]byte, error) { + dgst := sha256.Sum256([]byte(value)) + s, err := xsvid.PrivateKey.Sign(rand.Reader, dgst[:], crypto.SHA256) + if err != nil { + return nil, err + } + return s, nil +} + +func getManifest(results []v1beta1.PipelineResourceResult) string { + keys := []string{} + for _, r := range results { + if strings.HasSuffix(r.Key, KeySignatureSuffix) || r.Key == KeySVID || r.ResultType != v1beta1.TaskRunResultType { + continue + } + keys = append(keys, r.Key) + } + return strings.Join(keys, ",") +} + +// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash +func (sc *spireControllerAPIClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + // Add status hash + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + + // Sign with controller private key + xsvid, err := sc.fetchControllerSVID(ctx) + if err != nil { + return err + } + + sig, err := signWithKey(xsvid, currentHash) + if err != nil { + return err + } + + // Store Controller SVID + p := pem.EncodeToMemory(&pem.Block{ + Bytes: xsvid.Certificates[0].Raw, + Type: "CERTIFICATE", + }) + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[controllerSvidAnnotation] = string(p) + tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash + tr.Status.Annotations[taskRunStatusHashSigAnnotation] = base64.StdEncoding.EncodeToString(sig) + return nil +} diff --git a/pkg/spire/spire.go b/pkg/spire/spire.go new file mode 100644 index 00000000000..708ada1685b --- /dev/null +++ b/pkg/spire/spire.go @@ -0,0 +1,75 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The spire package is used to interact with the Spire server and Spire agent respectively. +// The pipeline controller (once registered) is able to create and delete entries in the Spire server +// for the various TaskRuns that it instantiates. The TaskRun is able to attest to the Spire agent +// and obtains the valid SVID (SPIFFE Verifiable Identity Document) to sign the TaskRun results. +// Separately, the pipeline controller SVID is used to sign the TaskRun Status to validate no modification +// during the TaskRun execution. Each TaskRun result and status is verified and validated once the +// TaskRun execution is completed. Tekton Chains will also validate the results and status before +// signing and creating attestation for the TaskRun. +package spire + +import ( + "context" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +const ( + // TaskRunStatusHashAnnotation TaskRun status annotation Hash Key + TaskRunStatusHashAnnotation = "tekton.dev/status-hash" + // taskRunStatusHashSigAnnotation TaskRun status annotation hash signature Key + taskRunStatusHashSigAnnotation = "tekton.dev/status-hash-sig" + // controllerSvidAnnotation TaskRun status annotation controller SVID Key + controllerSvidAnnotation = "tekton.dev/controller-svid" + // NotVerifiedAnnotation TaskRun status annotation not verified by spire key that get set when status match fails + NotVerifiedAnnotation = "tekton.dev/not-verified" + // KeySVID key used by TaskRun SVID + KeySVID = "SVID" + // KeySignatureSuffix is the suffix of the keys that contain signatures + KeySignatureSuffix = ".sig" + // KeyResultManifest key used to get the result manifest from the results + KeyResultManifest = "RESULT_MANIFEST" + // WorkloadAPI is the name of the SPIFFE/SPIRE CSI Driver volume + WorkloadAPI = "spiffe-workload-api" + // VolumeMountPath is the volume mount in the the pods to access the SPIFFE/SPIRE agent workload API + VolumeMountPath = "/spiffe-workload-api" +) + +// ControllerAPIClient interface maps to the spire controller API to interact with spire +type ControllerAPIClient interface { + AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error + CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool + Close() error + CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error + DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error + VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error + VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error + SetConfig(c spireconfig.SpireConfig) +} + +// EntrypointerAPIClient interface maps to the spire entrypointer API to interact with spire +type EntrypointerAPIClient interface { + Close() error + // Sign returns the signature material to be put in the PipelineResourceResult to append to the output results + Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) +} diff --git a/pkg/spire/spire_mock.go b/pkg/spire/spire_mock.go new file mode 100644 index 00000000000..bc878ff34e3 --- /dev/null +++ b/pkg/spire/spire_mock.go @@ -0,0 +1,314 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" +) + +func init() { + injection.Fake.RegisterClient(withFakeControllerClient) +} + +func withFakeControllerClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{}) +} + +// MockClient is a client used for mocking the this package for unit testing +// other tekton components that use the spire entrypointer or controller client. +// +// The MockClient implements both SpireControllerApiClient and SpireEntrypointerApiClient +// and in addition to that provides the helper functions to define and query internal state. +type MockClient struct { + // Entries is a dictionary of entries that mock the SPIRE server datastore (for function Sign only) + Entries map[string]bool + + // SignIdentities represents the list of identities to use to sign (providing context of a caller to Sign) + // when Sign is called, the identity is dequeued from the slice. A signature will only be provided if the + // corresponding entry is in Entries. This only takes effect if SignOverride is nil. + SignIdentities []string + + // VerifyAlwaysReturns defines whether to always verify successfully or to always fail verification if non-nil. + // This only take effect on Verify functions: + // - VerifyStatusInternalAnnotationOverride + // - VerifyTaskRunResultsOverride + VerifyAlwaysReturns *bool + + // VerifyStatusInternalAnnotationOverride contains the function to overwrite a call to VerifyStatusInternalAnnotation + VerifyStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error + + // VerifyTaskRunResultsOverride contains the function to overwrite a call to VerifyTaskRunResults + VerifyTaskRunResultsOverride func(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error + + // AppendStatusInternalAnnotationOverride contains the function to overwrite a call to AppendStatusInternalAnnotation + AppendStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun) error + + // CheckSpireVerifiedFlagOverride contains the function to overwrite a call to CheckSpireVerifiedFlag + CheckSpireVerifiedFlagOverride func(tr *v1beta1.TaskRun) bool + + // SignOverride contains the function to overwrite a call to Sign + SignOverride func(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) +} + +const ( + controllerSvid = "CONTROLLER_SVID_DATA" +) + +func (*MockClient) mockSign(content, signedBy string) string { + return fmt.Sprintf("signed-by-%s:%x", signedBy, sha256.Sum256([]byte(content))) +} + +func (sc *MockClient) mockVerify(content, sig, signedBy string) bool { + return sig == sc.mockSign(content, signedBy) +} + +// GetIdentity get the taskrun namespace and taskrun name that is used for signing and verifying in mocked spire +func (*MockClient) GetIdentity(tr *v1beta1.TaskRun) string { + return fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) +} + +// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash +func (sc *MockClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error { + if sc.AppendStatusInternalAnnotationOverride != nil { + return sc.AppendStatusInternalAnnotationOverride(ctx, tr) + } + // Add status hash + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[controllerSvidAnnotation] = controllerSvid + tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash + tr.Status.Annotations[taskRunStatusHashSigAnnotation] = sc.mockSign(currentHash, "controller") + return nil +} + +// CheckSpireVerifiedFlag checks if the not-verified status annotation is set which would result in spire verification failed +func (sc *MockClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool { + if sc.CheckSpireVerifiedFlagOverride != nil { + return sc.CheckSpireVerifiedFlagOverride(tr) + } + + if _, notVerified := tr.Status.Annotations[NotVerifiedAnnotation]; !notVerified { + return true + } + return false +} + +// CreateEntries adds entries to the dictionary of entries that mock the SPIRE server datastore +func (sc *MockClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error { + id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) + if sc.Entries == nil { + sc.Entries = map[string]bool{} + } + sc.Entries[id] = true + return nil +} + +// DeleteEntry removes the entry from the dictionary of entries that mock the SPIRE server datastore +func (sc *MockClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error { + id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) + if sc.Entries != nil { + delete(sc.Entries, id) + } + return nil +} + +// VerifyStatusInternalAnnotation checks that the internal status annotations are valid by the mocked spire client +func (sc *MockClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error { + if sc.VerifyStatusInternalAnnotationOverride != nil { + return sc.VerifyStatusInternalAnnotationOverride(ctx, tr, logger) + } + + if sc.VerifyAlwaysReturns != nil { + if *sc.VerifyAlwaysReturns { + return nil + } + return errors.New("failed to verify from mock VerifyAlwaysReturns") + } + + if !sc.CheckSpireVerifiedFlag(tr) { + return errors.New("annotation tekton.dev/not-verified = yes failed spire verification") + } + + annotations := tr.Status.Annotations + + // Verify annotations are there + if annotations[controllerSvidAnnotation] != controllerSvid { + return errors.New("svid annotation missing") + } + + // Check signature + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + if !sc.mockVerify(currentHash, annotations[taskRunStatusHashSigAnnotation], "controller") { + return errors.New("signature was not able to be verified") + } + + // check current status hash vs annotation status hash by controller + if err := CheckStatusInternalAnnotation(tr); err != nil { + return err + } + + return nil +} + +// VerifyTaskRunResults checks that all the TaskRun results are valid by the mocked spire client +func (sc *MockClient) VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error { + if sc.VerifyTaskRunResultsOverride != nil { + return sc.VerifyTaskRunResultsOverride(ctx, prs, tr) + } + + if sc.VerifyAlwaysReturns != nil { + if *sc.VerifyAlwaysReturns { + return nil + } + return errors.New("failed to verify from mock VerifyAlwaysReturns") + } + + resultMap := map[string]v1beta1.PipelineResourceResult{} + for _, r := range prs { + if r.ResultType == v1beta1.TaskRunResultType { + resultMap[r.Key] = r + } + } + + var identity string + // Get SVID identity + for k, p := range resultMap { + if k == KeySVID { + identity = p.Value + break + } + } + + // Verify manifest + if err := verifyManifest(resultMap); err != nil { + return err + } + + if identity != sc.GetIdentity(tr) { + return errors.New("mock identity did not match") + } + + for key, r := range resultMap { + if strings.HasSuffix(key, KeySignatureSuffix) { + continue + } + if key == KeySVID { + continue + } + + sigEntry, ok := resultMap[key+KeySignatureSuffix] + sigValue, err := getResultValue(sigEntry) + if err != nil { + return err + } + resultValue, err := getResultValue(r) + if err != nil { + return err + } + if !ok || !sc.mockVerify(resultValue, sigValue, identity) { + return errors.Errorf("failed to verify field: %v", key) + } + } + + return nil +} + +// Sign signs and appends signatures to the PipelineResourceResult based on the mocked spire client +func (sc *MockClient) Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + if sc.SignOverride != nil { + return sc.SignOverride(ctx, results) + } + + if len(sc.SignIdentities) == 0 { + return nil, errors.New("signIdentities empty, please provide identities to sign with the MockClient.GetIdentity function") + } + + identity := sc.SignIdentities[0] + sc.SignIdentities = sc.SignIdentities[1:] + + if !sc.Entries[identity] { + return nil, errors.Errorf("entry doesn't exist for identity: %v", identity) + } + + output := []v1beta1.PipelineResourceResult{} + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeySVID, + Value: identity, + ResultType: v1beta1.TaskRunResultType, + }) + + for _, r := range results { + if r.ResultType == v1beta1.TaskRunResultType { + resultValue, err := getResultValue(r) + if err != nil { + return nil, err + } + s := sc.mockSign(resultValue, identity) + output = append(output, v1beta1.PipelineResourceResult{ + Key: r.Key + KeySignatureSuffix, + Value: s, + ResultType: v1beta1.TaskRunResultType, + }) + } + } + // get complete manifest of keys such that it can be verified + manifest := getManifest(results) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest, + Value: manifest, + ResultType: v1beta1.TaskRunResultType, + }) + manifestSig := sc.mockSign(manifest, identity) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest + KeySignatureSuffix, + Value: manifestSig, + ResultType: v1beta1.TaskRunResultType, + }) + + return output, nil +} + +// Close mock closing the spire client connection +func (sc *MockClient) Close() error { + return nil +} + +// SetConfig sets the spire configuration for MockClient +func (sc *MockClient) SetConfig(c spireconfig.SpireConfig) { + return +} diff --git a/pkg/spire/spire_mock_test.go b/pkg/spire/spire_mock_test.go new file mode 100644 index 00000000000..b4719e2412d --- /dev/null +++ b/pkg/spire/spire_mock_test.go @@ -0,0 +1,755 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" +) + +// Simple task run sign/verify +func TestSpireMock_TaskRunSign(t *testing.T) { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ) + + ctx := context.Background() + var err error + + for _, tr := range testTaskRuns() { + err = cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + + err = cc.VerifyStatusInternalAnnotation(ctx, tr, nil) + if err != nil { + t.Fatalf("failed to verify TaskRun: %v", err) + } + } +} + +// test CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool +func TestSpireMock_CheckSpireVerifiedFlag(t *testing.T) { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ) + + trs := testTaskRuns() + tr := trs[0] + + if !cc.CheckSpireVerifiedFlag(tr) { + t.Fatalf("verified flag should be unset") + } + + if tr.Status.Status.Annotations == nil { + tr.Status.Status.Annotations = map[string]string{} + } + tr.Status.Status.Annotations[NotVerifiedAnnotation] = "yes" + + if cc.CheckSpireVerifiedFlag(tr) { + t.Fatalf("verified flag should be unset") + } +} + +// Task run check signed status is not the same with two taskruns +func TestSpireMock_CheckHashSimilarities(t *testing.T) { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ) + + ctx := context.Background() + trs := testTaskRuns() + tr1, tr2 := trs[0], trs[1] + + trs = testTaskRuns() + tr1c, tr2c := trs[0], trs[1] + + tr2c.Status.Status.Annotations = map[string]string{"new": "value"} + + signTrs := []*v1beta1.TaskRun{tr1, tr1c, tr2, tr2c} + + for _, tr := range signTrs { + err := cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + } + + if getHash(tr1) != getHash(tr1c) { + t.Fatalf("2 hashes of the same status should be same") + } + + if getHash(tr1) == getHash(tr2) { + t.Fatalf("2 hashes of different status should not be the same") + } + + if getHash(tr2) != getHash(tr2c) { + t.Fatalf("2 hashes of the same status should be same (ignoring Status.Status)") + } +} + +// Task run sign, modify signature/hash/svid/content and verify +func TestSpireMock_CheckTamper(t *testing.T) { + + tests := []struct { + // description of test case + desc string + // annotations to set + setAnnotations map[string]string + // modify the status + modifyStatus bool + // modify the hash to match the new status but not the signature + modifyHashToMatch bool + // if test should pass + verify bool + }{ + { + desc: "tamper nothing", + verify: true, + }, + { + desc: "tamper unrelated hash", + setAnnotations: map[string]string{ + "unrelated-hash": "change", + }, + verify: true, + }, + { + desc: "tamper status hash", + setAnnotations: map[string]string{ + TaskRunStatusHashAnnotation: "change-hash", + }, + verify: false, + }, + { + desc: "tamper sig", + setAnnotations: map[string]string{ + taskRunStatusHashSigAnnotation: "change-sig", + }, + verify: false, + }, + { + desc: "tamper SVID", + setAnnotations: map[string]string{ + controllerSvidAnnotation: "change-svid", + }, + verify: false, + }, + { + desc: "delete status hash", + setAnnotations: map[string]string{ + TaskRunStatusHashAnnotation: "", + }, + verify: false, + }, + { + desc: "delete sig", + setAnnotations: map[string]string{ + taskRunStatusHashSigAnnotation: "", + }, + verify: false, + }, + { + desc: "delete SVID", + setAnnotations: map[string]string{ + controllerSvidAnnotation: "", + }, + verify: false, + }, + { + desc: "tamper status", + modifyStatus: true, + verify: false, + }, + { + desc: "tamper status and status hash", + modifyStatus: true, + modifyHashToMatch: true, + verify: false, + }, + } + for _, tt := range tests { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ) + + ctx := context.Background() + for _, tr := range testTaskRuns() { + err := cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + + if tr.Status.Status.Annotations == nil { + tr.Status.Status.Annotations = map[string]string{} + } + + if tt.setAnnotations != nil { + for k, v := range tt.setAnnotations { + tr.Status.Status.Annotations[k] = v + } + } + + if tt.modifyStatus { + tr.Status.TaskRunStatusFields.Steps = append(tr.Status.TaskRunStatusFields.Steps, v1beta1.StepState{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(54321)}, + }}) + } + + if tt.modifyHashToMatch { + h, _ := hashTaskrunStatusInternal(tr) + tr.Status.Status.Annotations[TaskRunStatusHashAnnotation] = h + } + + verified := cc.VerifyStatusInternalAnnotation(ctx, tr, nil) == nil + if verified != tt.verify { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.verify, verified) + } + } + + } + +} + +// Task result sign and verify +func TestSpireMock_TaskRunResultsSign(t *testing.T) { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ec EntrypointerAPIClient = spireMockClient + ) + + testCases := []struct { + // description of test + desc string + // skip entry creation of pod identity + skipEntryCreate bool + // set wrong pod identity for signer + wrongPodIdentity bool + // whether sign/verify procedure should succeed + success bool + }{ + { + desc: "regular sign/verify result", + success: true, + }, + { + desc: "sign/verify result when entry isn't created", + skipEntryCreate: true, + success: false, + }, + { + desc: "sign/verify result when signing with wrong pod identity", + wrongPodIdentity: true, + success: false, + }, + } + + for _, tt := range testCases { + ctx := context.Background() + for _, tr := range testTaskRuns() { + + var err error + if !tt.skipEntryCreate { + // Pod should not be nil, but it isn't used in mocking + // implementation so should not matter + err = cc.CreateEntries(ctx, tr, genPodObj(tr, ""), 10000) + if err != nil { + t.Fatalf("unable to create entry") + } + } + + for _, results := range testPipelineResourceResults() { + success := func() bool { + spireMockClient.SignIdentities = []string{spireMockClient.GetIdentity(tr)} + if tt.wrongPodIdentity { + spireMockClient.SignIdentities = []string{"wrong-identity"} + } + + sigResults, err := ec.Sign(ctx, results) + if err != nil { + return false + } + + results = append(results, sigResults...) + + err = cc.VerifyTaskRunResults(ctx, results, tr) + if err != nil { + return false + } + + return true + }() + + if success != tt.success { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.success, success) + } + } + + err = cc.DeleteEntry(ctx, tr, genPodObj(tr, "")) + if err != nil { + t.Fatalf("unable to delete entry: %v", err) + } + } + } +} + +// Task result sign, modify signature/content and verify +func TestSpireMock_TaskRunResultsSignTamper(t *testing.T) { + spireMockClient := &MockClient{} + var ( + cc ControllerAPIClient = spireMockClient + ec EntrypointerAPIClient = spireMockClient + ) + + genPr := func() []v1beta1.PipelineResourceResult { + return []v1beta1.PipelineResourceResult{ + { + Key: "foo", + Value: "foo-value", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "bar", + Value: "bar-value", + ResourceName: "source-image2", + ResultType: v1beta1.TaskRunResultType, + }, + } + } + + testCases := []struct { + // description of test + desc string + // function to tamper + tamperFn func([]v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult + // whether sign/verify procedure should succeed + success bool + }{ + { + desc: "no tamper", + success: true, + }, + { + desc: "non-intrusive tamper", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + prs = append(prs, v1beta1.PipelineResourceResult{ + Key: "not-taskrun-result-type-add", + Value: "abc:12345", + }) + return prs + }, + success: true, + }, + { + desc: "tamper SVID", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeySVID { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result manifest signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest+KeySignatureSuffix { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result field", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo" { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result field signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo"+KeySignatureSuffix { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "delete SVID", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeySVID { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result manifest signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest+KeySignatureSuffix { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result field", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo" { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result field signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo"+KeySignatureSuffix { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "add to result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + prs[i].Value += ",xyz" + } + } + return prs + }, + success: false, + }, + } + + for _, tt := range testCases { + ctx := context.Background() + for _, tr := range testTaskRuns() { + + var err error + // Pod should not be nil, but it isn't used in mocking + // implementation so should not matter + err = cc.CreateEntries(ctx, tr, genPodObj(tr, ""), 10000) + if err != nil { + t.Fatalf("unable to create entry") + } + + results := genPr() + success := func() bool { + spireMockClient.SignIdentities = []string{spireMockClient.GetIdentity(tr)} + + sigResults, err := ec.Sign(ctx, results) + if err != nil { + return false + } + + results = append(results, sigResults...) + if tt.tamperFn != nil { + results = tt.tamperFn(results) + } + + err = cc.VerifyTaskRunResults(ctx, results, tr) + if err != nil { + return false + } + + return true + }() + + if success != tt.success { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.success, success) + } + + err = cc.DeleteEntry(ctx, tr, genPodObj(tr, "")) + if err != nil { + t.Fatalf("unable to delete entry: %v", err) + } + } + } +} + +func objectMeta(name, ns string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: map[string]string{}, + Annotations: map[string]string{}, + } +} + +func testTaskRuns() []*v1beta1.TaskRun { + return []*v1beta1.TaskRun{ + // taskRun 1 + { + ObjectMeta: objectMeta("taskrun-example", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskname", + APIVersion: "a1", + }, + ServiceAccountName: "test-sa", + }, + }, + // taskRun 2 + { + ObjectMeta: objectMeta("taskrun-example-populated", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + // taskRun 3 + { + ObjectMeta: objectMeta("taskrun-example-with-objmeta", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + { + ObjectMeta: objectMeta("taskrun-example-with-objmeta-annotations", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + Annotations: map[string]string{ + "annotation1": "a1value", + "annotation2": "a2value", + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + } +} + +func testPipelineResourceResults() [][]v1beta1.PipelineResourceResult { + return [][]v1beta1.PipelineResourceResult{ + // Single result + { + { + Key: "digest", + Value: "sha256:12345", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + }, + // array result + { + { + Key: "resultName", + Value: "[\"hello\",\"world\"]", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + }, + // array result + { + { + Key: "resultArray", + Value: "{\"key1\":\"var1\",\"key2\":\"var2\"}", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + }, + // multi result + { + { + Key: "foo", + Value: "abc", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "bar", + Value: "xyz", + ResourceName: "source-image2", + ResultType: v1beta1.TaskRunResultType, + }, + }, + // mix result type + { + { + Key: "foo", + Value: "abc", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "bar", + Value: "xyz", + ResourceName: "source-image2", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "resultName", + Value: "[\"hello\",\"world\"]", + ResourceName: "source-image3", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "resultName2", + Value: "{\"key1\":\"var1\",\"key2\":\"var2\"}", + ResourceName: "source-image4", + ResultType: v1beta1.TaskRunResultType, + }, + }, + // not TaskRunResultType + { + { + Key: "not-taskrun-result-type", + Value: "sha256:12345", + ResourceName: "source-image", + }, + }, + // empty result + {}, + } +} + +func getHash(tr *v1beta1.TaskRun) string { + return tr.Status.Status.Annotations[TaskRunStatusHashAnnotation] +} + +func genPodObj(tr *v1beta1.TaskRun, uid string) *corev1.Pod { + if uid == "" { + uid = uuid.NewString() + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: tr.ObjectMeta.Namespace, + Name: "pod-" + tr.ObjectMeta.Name, + UID: types.UID(uid), + }, + } + + return pod +} diff --git a/pkg/spire/spire_test.go b/pkg/spire/spire_test.go new file mode 100644 index 00000000000..b7ac0e3d13e --- /dev/null +++ b/pkg/spire/spire_test.go @@ -0,0 +1,731 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "fmt" + "testing" + + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" + "github.com/tektoncd/pipeline/pkg/spire/config" + "github.com/tektoncd/pipeline/pkg/spire/test" + "github.com/tektoncd/pipeline/pkg/spire/test/fakeworkloadapi" + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/logging" +) + +var ( + trustDomain = "example.org" + td = spiffeid.RequireTrustDomainFromString(trustDomain) + fooID = spiffeid.RequireFromPath(td, "/foo") + controllerID = spiffeid.RequireFromPath(td, "/controller") +) + +func TestSpire_TaskRunSign(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, controllerID), + } + wl.SetX509SVIDResponse(resp) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + logger := logging.FromContext(ctx) + + var ( + cc = spireControllerClient + ) + defer cc.Close() + + var err error + + for _, tr := range testTaskRuns() { + err = cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + + err = cc.VerifyStatusInternalAnnotation(ctx, tr, logger) + if err != nil { + t.Fatalf("failed to verify TaskRun: %v", err) + } + } +} + +func TestSpire_CheckSpireVerifiedFlag(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, controllerID), + } + wl.SetX509SVIDResponse(resp) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + var ( + cc = spireControllerClient + ) + defer cc.Close() + + trs := testTaskRuns() + tr := trs[0] + + if !cc.CheckSpireVerifiedFlag(tr) { + t.Fatalf("verified flag should be unset") + } + + if tr.Status.Status.Annotations == nil { + tr.Status.Status.Annotations = map[string]string{} + } + tr.Status.Status.Annotations[NotVerifiedAnnotation] = "yes" + + if cc.CheckSpireVerifiedFlag(tr) { + t.Fatalf("verified flag should be unset") + } +} + +func TestSpire_CheckHashSimilarities(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, controllerID), + } + wl.SetX509SVIDResponse(resp) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + var ( + cc = spireControllerClient + ) + defer cc.Close() + + trs := testTaskRuns() + tr1, tr2 := trs[0], trs[1] + + trs = testTaskRuns() + tr1c, tr2c := trs[0], trs[1] + + tr2c.Status.Status.Annotations = map[string]string{"new": "value"} + + signTrs := []*v1beta1.TaskRun{tr1, tr1c, tr2, tr2c} + + for _, tr := range signTrs { + err := cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + } + + if getHash(tr1) != getHash(tr1c) { + t.Fatalf("2 hashes of the same status should be same") + } + + if getHash(tr1) == getHash(tr2) { + t.Fatalf("2 hashes of different status should not be the same") + } + + if getHash(tr2) != getHash(tr2c) { + t.Fatalf("2 hashes of the same status should be same (ignoring Status.Status)") + } +} + +// Task run sign, modify signature/hash/svid/content and verify +func TestSpire_CheckTamper(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, controllerID), + } + wl.SetX509SVIDResponse(resp) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + logger := logging.FromContext(ctx) + + var ( + cc = spireControllerClient + ) + defer cc.Close() + + tests := []struct { + // description of test case + desc string + // annotations to set + setAnnotations map[string]string + // skip annotation set + skipAnnotation bool + // modify the status + modifyStatus bool + // modify the hash to match the new status but not the signature + modifyHashToMatch bool + // if test should pass + verify bool + }{ + { + desc: "tamper nothing", + verify: true, + }, + { + desc: "tamper unrelated hash", + setAnnotations: map[string]string{ + "unrelated-hash": "change", + }, + verify: true, + }, + { + desc: "tamper status hash", + setAnnotations: map[string]string{ + TaskRunStatusHashAnnotation: "change-hash", + }, + verify: false, + }, + { + desc: "tamper sig", + setAnnotations: map[string]string{ + taskRunStatusHashSigAnnotation: "change-sig", + }, + verify: false, + }, + { + desc: "tamper SVID", + setAnnotations: map[string]string{ + controllerSvidAnnotation: "change-svid", + }, + verify: false, + }, + { + desc: "delete status hash", + setAnnotations: map[string]string{ + TaskRunStatusHashAnnotation: "", + }, + verify: false, + }, + { + desc: "delete sig", + setAnnotations: map[string]string{ + taskRunStatusHashSigAnnotation: "", + }, + verify: false, + }, + { + desc: "delete SVID", + setAnnotations: map[string]string{ + controllerSvidAnnotation: "", + }, + verify: false, + }, + { + desc: "set temper flag", + setAnnotations: map[string]string{ + NotVerifiedAnnotation: "true", + }, + verify: false, + }, + { + desc: "tamper status", + modifyStatus: true, + verify: false, + }, + { + desc: "tamper status and status hash", + modifyStatus: true, + modifyHashToMatch: true, + verify: false, + }, + { + desc: "tamper status and status hash", + skipAnnotation: true, + verify: false, + }, + } + for _, tt := range tests { + + for _, tr := range testTaskRuns() { + if !tt.skipAnnotation { + err := cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + t.Fatalf("failed to sign TaskRun: %v", err) + } + } + + if tr.Status.Status.Annotations == nil { + tr.Status.Status.Annotations = map[string]string{} + } + + if tt.setAnnotations != nil { + for k, v := range tt.setAnnotations { + tr.Status.Status.Annotations[k] = v + } + } + + if tt.modifyStatus { + tr.Status.TaskRunStatusFields.Steps = append(tr.Status.TaskRunStatusFields.Steps, v1beta1.StepState{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(54321)}, + }}) + } + + if tt.modifyHashToMatch { + h, _ := hashTaskrunStatusInternal(tr) + tr.Status.Status.Annotations[TaskRunStatusHashAnnotation] = h + } + + verified := cc.VerifyStatusInternalAnnotation(ctx, tr, logger) == nil + if verified != tt.verify { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.verify, verified) + } + } + + } + +} + +func TestSpire_TaskRunResultsSign(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireEntryPointerClient := NewEntrypointerAPIClient(cfg) + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + var ( + cc = spireControllerClient + ec = spireEntryPointerClient + ) + defer cc.Close() + defer ec.Close() + + testCases := []struct { + // description of test + desc string + // skip entry creation of pod identity + skipEntryCreate bool + // set wrong pod identity for signer + wrongPodIdentity bool + // whether sign/verify procedure should succeed + success bool + // List of taskruns to test against + taskRunList []*v1beta1.TaskRun + // List of PipelineResourceResult to test against + pipelineResourceResults [][]v1beta1.PipelineResourceResult + }{ + { + desc: "sign/verify result when entry isn't created", + skipEntryCreate: true, + success: false, + // Using single taskrun and pipelineResourceResults as the unit test + // times out with the default 30 second. getWorkloadSVID has a 20 second + // time out before it throws an error for no svid found + taskRunList: testSingleTaskRun(), + pipelineResourceResults: testSinglePipelineResourceResults(), + }, + { + desc: "regular sign/verify result", + success: true, + taskRunList: testTaskRuns(), + pipelineResourceResults: testPipelineResourceResults(), + }, + { + desc: "sign/verify result when signing with wrong pod identity", + wrongPodIdentity: true, + success: false, + taskRunList: testTaskRuns(), + pipelineResourceResults: testPipelineResourceResults(), + }, + } + + for _, tt := range testCases { + ctx := context.Background() + for _, tr := range tt.taskRunList { + if !tt.skipEntryCreate { + if !tt.wrongPodIdentity { + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, spiffeid.RequireFromPath(td, getTaskrunPath(tr))), + } + wl.SetX509SVIDResponse(resp) + } else { + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, fooID), + } + wl.SetX509SVIDResponse(resp) + } + } + + for _, results := range tt.pipelineResourceResults { + success := func() bool { + + sigResults, err := ec.Sign(ctx, results) + if err != nil { + return false + } + + results = append(results, sigResults...) + + err = cc.VerifyTaskRunResults(ctx, results, tr) + if err != nil { + return false + } + + return true + }() + + if success != tt.success { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.success, success) + } + } + } + } +} + +// Task result sign, modify signature/content and verify +func TestSpire_TaskRunResultsSignTamper(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) + + ca := test.NewCA(t, td) + + wl := fakeworkloadapi.New(t) + defer wl.Stop() + + wl.SetX509Bundles(ca.X509Bundle()) + + cfg := &config.SpireConfig{} + cfg.SocketPath = wl.Addr() + cfg.TrustDomain = trustDomain + spireEntryPointerClient := NewEntrypointerAPIClient(cfg) + spireControllerClient := GetControllerAPIClient(ctx) + spireControllerClient.SetConfig(*cfg) + + var ( + cc = spireControllerClient + ec = spireEntryPointerClient + ) + defer cc.Close() + defer ec.Close() + + genPr := func() []v1beta1.PipelineResourceResult { + return []v1beta1.PipelineResourceResult{ + { + Key: "foo", + Value: "foo-value", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + { + Key: "bar", + Value: "bar-value", + ResourceName: "source-image2", + ResultType: v1beta1.TaskRunResultType, + }, + } + } + + testCases := []struct { + // description of test + desc string + // function to tamper + tamperFn func([]v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult + // whether sign/verify procedure should succeed + success bool + }{ + { + desc: "no tamper", + success: true, + }, + { + desc: "non-intrusive tamper", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + prs = append(prs, v1beta1.PipelineResourceResult{ + Key: "not-taskrun-result-type-add", + Value: "abc:12345", + }) + return prs + }, + success: true, + }, + { + desc: "tamper SVID", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeySVID { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result manifest signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest+KeySignatureSuffix { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result field", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo" { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "tamper result field signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo"+KeySignatureSuffix { + prs[i].Value = "tamper-value" + } + } + return prs + }, + success: false, + }, + { + desc: "delete SVID", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeySVID { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result manifest signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest+KeySignatureSuffix { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result field", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo" { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "delete result field signature", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == "foo"+KeySignatureSuffix { + return append(prs[:i], prs[i+1:]...) + } + } + return prs + }, + success: false, + }, + { + desc: "add to result manifest", + tamperFn: func(prs []v1beta1.PipelineResourceResult) []v1beta1.PipelineResourceResult { + for i, pr := range prs { + if pr.Key == KeyResultManifest { + prs[i].Value += ",xyz" + } + } + return prs + }, + success: false, + }, + } + + for _, tt := range testCases { + ctx := context.Background() + for _, tr := range testTaskRuns() { + + results := genPr() + success := func() bool { + + resp := &fakeworkloadapi.X509SVIDResponse{ + Bundle: ca.X509Bundle(), + SVIDs: makeX509SVIDs(ca, spiffeid.RequireFromPath(td, getTaskrunPath(tr))), + } + wl.SetX509SVIDResponse(resp) + + sigResults, err := ec.Sign(ctx, results) + if err != nil { + return false + } + + results = append(results, sigResults...) + if tt.tamperFn != nil { + results = tt.tamperFn(results) + } + + err = cc.VerifyTaskRunResults(ctx, results, tr) + if err != nil { + return false + } + + return true + }() + + if success != tt.success { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.success, success) + } + } + } +} + +func makeX509SVIDs(ca *test.CA, ids ...spiffeid.ID) []*x509svid.SVID { + svids := []*x509svid.SVID{} + for _, id := range ids { + svids = append(svids, ca.CreateX509SVID(id)) + } + return svids +} + +func getTaskrunPath(tr *v1beta1.TaskRun) string { + return fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) +} + +func testSingleTaskRun() []*v1beta1.TaskRun { + return []*v1beta1.TaskRun{ + // taskRun 1 + { + ObjectMeta: objectMeta("taskrun-example", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskname", + APIVersion: "a1", + }, + ServiceAccountName: "test-sa", + }, + }, + } +} + +func testSinglePipelineResourceResults() [][]v1beta1.PipelineResourceResult { + return [][]v1beta1.PipelineResourceResult{ + // Single result + { + { + Key: "digest", + Value: "sha256:12345", + ResourceName: "source-image", + ResultType: v1beta1.TaskRunResultType, + }, + }, + } +} diff --git a/pkg/spire/test/ca.go b/pkg/spire/test/ca.go new file mode 100644 index 00000000000..623da6151ac --- /dev/null +++ b/pkg/spire/test/ca.go @@ -0,0 +1,286 @@ +package test + +import ( + "crypto" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "math/big" + "net" + "net/url" + "testing" + "time" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/spiffebundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/stretchr/testify/require" + "github.com/tektoncd/pipeline/pkg/spire/test/x509util" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/cryptosigner" + "gopkg.in/square/go-jose.v2/jwt" +) + +var ( + localhostIPs = []net.IP{net.IPv4(127, 0, 0, 1), net.IPv6loopback} +) + +type CA struct { + tb testing.TB + td spiffeid.TrustDomain + parent *CA + cert *x509.Certificate + key crypto.Signer + jwtKey crypto.Signer + jwtKid string +} + +type CertificateOption interface { + apply(*x509.Certificate) +} + +type certificateOption func(*x509.Certificate) + +func (co certificateOption) apply(c *x509.Certificate) { + co(c) +} + +func NewCA(tb testing.TB, td spiffeid.TrustDomain) *CA { + cert, key := CreateCACertificate(tb, nil, nil) + return &CA{ + tb: tb, + td: td, + cert: cert, + key: key, + jwtKey: NewEC256Key(tb), + jwtKid: NewKeyID(tb), + } +} + +func (ca *CA) ChildCA(options ...CertificateOption) *CA { + cert, key := CreateCACertificate(ca.tb, ca.cert, ca.key, options...) + return &CA{ + tb: ca.tb, + parent: ca, + cert: cert, + key: key, + jwtKey: NewEC256Key(ca.tb), + jwtKid: NewKeyID(ca.tb), + } +} + +func (ca *CA) CreateX509SVID(id spiffeid.ID, options ...CertificateOption) *x509svid.SVID { + cert, key := CreateX509SVID(ca.tb, ca.cert, ca.key, id, options...) + return &x509svid.SVID{ + ID: id, + Certificates: append([]*x509.Certificate{cert}, ca.chain(false)...), + PrivateKey: key, + } +} + +func (ca *CA) CreateX509Certificate(options ...CertificateOption) ([]*x509.Certificate, crypto.Signer) { + cert, key := CreateX509Certificate(ca.tb, ca.cert, ca.key, options...) + return append([]*x509.Certificate{cert}, ca.chain(false)...), key +} + +func (ca *CA) CreateJWTSVID(id spiffeid.ID, audience []string) *jwtsvid.SVID { + claims := jwt.Claims{ + Subject: id.String(), + Issuer: "FAKECA", + Audience: audience, + IssuedAt: jwt.NewNumericDate(time.Now()), + Expiry: jwt.NewNumericDate(time.Now().Add(time.Hour)), + } + + jwtSigner, err := jose.NewSigner( + jose.SigningKey{ + Algorithm: jose.ES256, + Key: jose.JSONWebKey{ + Key: cryptosigner.Opaque(ca.jwtKey), + KeyID: ca.jwtKid, + }, + }, + new(jose.SignerOptions).WithType("JWT"), + ) + require.NoError(ca.tb, err) + + signedToken, err := jwt.Signed(jwtSigner).Claims(claims).CompactSerialize() + require.NoError(ca.tb, err) + + svid, err := jwtsvid.ParseInsecure(signedToken, audience) + require.NoError(ca.tb, err) + return svid +} + +func (ca *CA) X509Authorities() []*x509.Certificate { + root := ca + for root.parent != nil { + root = root.parent + } + return []*x509.Certificate{root.cert} +} + +func (ca *CA) JWTAuthorities() map[string]crypto.PublicKey { + return map[string]crypto.PublicKey{ + ca.jwtKid: ca.jwtKey.Public(), + } +} + +func (ca *CA) Bundle() *spiffebundle.Bundle { + bundle := spiffebundle.New(ca.td) + bundle.SetX509Authorities(ca.X509Authorities()) + bundle.SetJWTAuthorities(ca.JWTAuthorities()) + return bundle +} + +func (ca *CA) X509Bundle() *x509bundle.Bundle { + return x509bundle.FromX509Authorities(ca.td, ca.X509Authorities()) +} + +func (ca *CA) JWTBundle() *jwtbundle.Bundle { + return jwtbundle.FromJWTAuthorities(ca.td, ca.JWTAuthorities()) +} + +func CreateCACertificate(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, options ...CertificateOption) (*x509.Certificate, crypto.Signer) { + now := time.Now() + serial := NewSerial(tb) + key := NewEC256Key(tb) + tmpl := &x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{ + CommonName: fmt.Sprintf("CA %x", serial), + }, + BasicConstraintsValid: true, + IsCA: true, + NotBefore: now, + NotAfter: now.Add(time.Hour), + } + + applyOptions(tmpl, options...) + + if parent == nil { + parent = tmpl + parentKey = key + } + return CreateCertificate(tb, tmpl, parent, key.Public(), parentKey), key +} + +func CreateX509Certificate(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, options ...CertificateOption) (*x509.Certificate, crypto.Signer) { + now := time.Now() + serial := NewSerial(tb) + key := NewEC256Key(tb) + tmpl := &x509.Certificate{ + SerialNumber: serial, + Subject: pkix.Name{ + CommonName: fmt.Sprintf("X509-Certificate %x", serial), + }, + NotBefore: now, + NotAfter: now.Add(time.Hour), + KeyUsage: x509.KeyUsageDigitalSignature, + } + + applyOptions(tmpl, options...) + + return CreateCertificate(tb, tmpl, parent, key.Public(), parentKey), key +} + +func CreateX509SVID(tb testing.TB, parent *x509.Certificate, parentKey crypto.Signer, id spiffeid.ID, options ...CertificateOption) (*x509.Certificate, crypto.Signer) { + serial := NewSerial(tb) + options = append(options, + WithSerial(serial), + WithKeyUsage(x509.KeyUsageDigitalSignature), + WithSubject(pkix.Name{ + CommonName: fmt.Sprintf("X509-SVID %x", serial), + }), + WithURIs(id.URL())) + + return CreateX509Certificate(tb, parent, parentKey, options...) +} + +func CreateCertificate(tb testing.TB, tmpl, parent *x509.Certificate, pub, priv interface{}) *x509.Certificate { + certDER, err := x509.CreateCertificate(rand.Reader, tmpl, parent, pub, priv) + require.NoError(tb, err) + cert, err := x509.ParseCertificate(certDER) + require.NoError(tb, err) + return cert +} + +func CreateWebCredentials(t testing.TB) (*x509.CertPool, *tls.Certificate) { + rootCert, rootKey := CreateCACertificate(t, nil, nil) + + childCert, childKey := CreateX509Certificate(t, rootCert, rootKey, + WithIPAddresses(localhostIPs...)) + + return x509util.NewCertPool([]*x509.Certificate{rootCert}), + &tls.Certificate{ + Certificate: [][]byte{childCert.Raw}, + PrivateKey: childKey, + } +} + +func NewSerial(tb testing.TB) *big.Int { + b := make([]byte, 8) + _, err := rand.Read(b) + require.NoError(tb, err) + return new(big.Int).SetBytes(b) +} + +func WithSerial(serial *big.Int) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.SerialNumber = serial + }) +} + +func WithKeyUsage(keyUsage x509.KeyUsage) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.KeyUsage = keyUsage + }) +} + +func WithLifetime(notBefore, notAfter time.Time) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.NotBefore = notBefore + c.NotAfter = notAfter + }) +} + +func WithIPAddresses(ips ...net.IP) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.IPAddresses = ips + }) +} + +func WithURIs(uris ...*url.URL) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.URIs = uris + }) +} + +func WithSubject(subject pkix.Name) CertificateOption { + return certificateOption(func(c *x509.Certificate) { + c.Subject = subject + }) +} + +func applyOptions(c *x509.Certificate, options ...CertificateOption) { + for _, opt := range options { + opt.apply(c) + } +} + +func (ca *CA) chain(includeRoot bool) []*x509.Certificate { + chain := []*x509.Certificate{} + next := ca + for next != nil { + if includeRoot || next.parent != nil { + chain = append(chain, next.cert) + } + next = next.parent + } + return chain +} diff --git a/pkg/spire/test/errstrings/err_posix.go b/pkg/spire/test/errstrings/err_posix.go new file mode 100644 index 00000000000..24ae1a71b6f --- /dev/null +++ b/pkg/spire/test/errstrings/err_posix.go @@ -0,0 +1,9 @@ +//go:build !windows +// +build !windows + +// OS specific error strings +package errstrings + +var ( + FileNotFound = "no such file or directory" +) diff --git a/pkg/spire/test/errstrings/err_windows.go b/pkg/spire/test/errstrings/err_windows.go new file mode 100644 index 00000000000..99101ef899f --- /dev/null +++ b/pkg/spire/test/errstrings/err_windows.go @@ -0,0 +1,9 @@ +//go:build windows +// +build windows + +// OS specific error strings +package errstrings + +var ( + FileNotFound = "The system cannot find the file specified." +) diff --git a/pkg/spire/test/fakebundleendpoint/server.go b/pkg/spire/test/fakebundleendpoint/server.go new file mode 100644 index 00000000000..94099b0e907 --- /dev/null +++ b/pkg/spire/test/fakebundleendpoint/server.go @@ -0,0 +1,138 @@ +package fakebundleendpoint + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "net" + "net/http" + "sync" + "testing" + + "github.com/spiffe/go-spiffe/v2/bundle/spiffebundle" + "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/stretchr/testify/assert" + "github.com/tektoncd/pipeline/pkg/spire/test" + "github.com/tektoncd/pipeline/pkg/spire/test/x509util" +) + +type Server struct { + tb testing.TB + wg sync.WaitGroup + addr net.Addr + httpServer *http.Server + // Root certificates used by clients to verify server certificates. + rootCAs *x509.CertPool + // TLS configuration used by the server. + tlscfg *tls.Config + // SPIFFE bundles that can be returned by this Server. + bundles []*spiffebundle.Bundle +} + +type ServerOption interface { + apply(*Server) +} + +func New(tb testing.TB, option ...ServerOption) *Server { + rootCAs, cert := test.CreateWebCredentials(tb) + tlscfg := &tls.Config{ + Certificates: []tls.Certificate{*cert}, + } + + s := &Server{ + tb: tb, + rootCAs: rootCAs, + tlscfg: tlscfg, + } + + for _, opt := range option { + opt.apply(s) + } + + sm := http.NewServeMux() + sm.HandleFunc("/test-bundle", s.testbundle) + s.httpServer = &http.Server{ + Handler: sm, + TLSConfig: s.tlscfg, + } + err := s.start() + if err != nil { + tb.Fatalf("Failed to start: %v", err) + } + return s +} + +func (s *Server) Shutdown() { + err := s.httpServer.Shutdown(context.Background()) + assert.NoError(s.tb, err) + s.wg.Wait() +} + +func (s *Server) Addr() string { + return s.addr.String() +} + +func (s *Server) FetchBundleURL() string { + return fmt.Sprintf("https://%s/test-bundle", s.Addr()) +} + +func (s *Server) RootCAs() *x509.CertPool { + return s.rootCAs +} + +func (s *Server) start() error { + ln, err := net.Listen("tcp", "127.0.0.1:") + if err != nil { + return err + } + + s.addr = ln.Addr() + + s.wg.Add(1) + go func() { + err := s.httpServer.ServeTLS(ln, "", "") + assert.EqualError(s.tb, err, http.ErrServerClosed.Error()) + s.wg.Done() + ln.Close() + }() + return nil +} + +func (s *Server) testbundle(w http.ResponseWriter, r *http.Request) { + if len(s.bundles) == 0 { + w.WriteHeader(http.StatusNotFound) + return + } + + bb, err := s.bundles[0].Marshal() + assert.NoError(s.tb, err) + s.bundles = s.bundles[1:] + w.Header().Add("Content-Type", "application/json") + b, err := w.Write(bb) + assert.NoError(s.tb, err) + assert.Equal(s.tb, len(bb), b) +} + +type serverOption func(*Server) + +// WithTestBundles sets the bundles that are returned by the Bundle Endpoint. You can +// specify several bundles, which are going to be returned one at a time each time +// a bundle is GET by a client. +func WithTestBundles(bundles ...*spiffebundle.Bundle) ServerOption { + return serverOption(func(s *Server) { + s.bundles = bundles + }) +} + +func WithSPIFFEAuth(bundle *spiffebundle.Bundle, svid *x509svid.SVID) ServerOption { + return serverOption(func(s *Server) { + s.rootCAs = x509util.NewCertPool(bundle.X509Authorities()) + s.tlscfg = tlsconfig.TLSServerConfig(svid) + }) +} + +func (so serverOption) apply(s *Server) { + so(s) +} diff --git a/pkg/spire/test/fakeworkloadapi/workload_api.go b/pkg/spire/test/fakeworkloadapi/workload_api.go new file mode 100644 index 00000000000..17ccdbf115f --- /dev/null +++ b/pkg/spire/test/fakeworkloadapi/workload_api.go @@ -0,0 +1,407 @@ +package fakeworkloadapi + +import ( + "context" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "sync" + "testing" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/proto/spiffe/workload" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tektoncd/pipeline/pkg/spire/test/pemutil" + "github.com/tektoncd/pipeline/pkg/spire/test/x509util" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/structpb" +) + +var noIdentityError = status.Error(codes.PermissionDenied, "no identity issued") + +type WorkloadAPI struct { + tb testing.TB + wg sync.WaitGroup + addr string + server *grpc.Server + mu sync.Mutex + x509Resp *workload.X509SVIDResponse + x509Chans map[chan *workload.X509SVIDResponse]struct{} + jwtResp *workload.JWTSVIDResponse + jwtBundlesResp *workload.JWTBundlesResponse + jwtBundlesChans map[chan *workload.JWTBundlesResponse]struct{} + x509BundlesResp *workload.X509BundlesResponse + x509BundlesChans map[chan *workload.X509BundlesResponse]struct{} +} + +func New(tb testing.TB) *WorkloadAPI { + w := &WorkloadAPI{ + x509Chans: make(map[chan *workload.X509SVIDResponse]struct{}), + jwtBundlesChans: make(map[chan *workload.JWTBundlesResponse]struct{}), + x509BundlesChans: make(map[chan *workload.X509BundlesResponse]struct{}), + } + + listener, err := newListener() + require.NoError(tb, err) + + server := grpc.NewServer() + workload.RegisterSpiffeWorkloadAPIServer(server, &workloadAPIWrapper{w: w}) + + w.wg.Add(1) + go func() { + defer w.wg.Done() + _ = server.Serve(listener) + }() + + w.addr = getTargetName(listener.Addr()) + tb.Logf("WorkloadAPI address: %s", w.addr) + w.server = server + return w +} + +func (w *WorkloadAPI) Stop() { + w.server.Stop() + w.wg.Wait() +} + +func (w *WorkloadAPI) Addr() string { + return w.addr +} + +func (w *WorkloadAPI) SetX509SVIDResponse(r *X509SVIDResponse) { + var resp *workload.X509SVIDResponse + if r != nil { + resp = r.ToProto(w.tb) + } + + w.mu.Lock() + defer w.mu.Unlock() + w.x509Resp = resp + + for ch := range w.x509Chans { + select { + case ch <- resp: + default: + <-ch + ch <- resp + } + } +} + +func (w *WorkloadAPI) SetJWTSVIDResponse(r *workload.JWTSVIDResponse) { + w.mu.Lock() + defer w.mu.Unlock() + if r != nil { + w.jwtResp = r + } +} + +func (w *WorkloadAPI) SetJWTBundles(jwtBundles ...*jwtbundle.Bundle) { + resp := &workload.JWTBundlesResponse{ + Bundles: make(map[string][]byte), + } + for _, bundle := range jwtBundles { + bundleBytes, err := bundle.Marshal() + assert.NoError(w.tb, err) + resp.Bundles[bundle.TrustDomain().String()] = bundleBytes + } + + w.mu.Lock() + defer w.mu.Unlock() + w.jwtBundlesResp = resp + + for ch := range w.jwtBundlesChans { + select { + case ch <- w.jwtBundlesResp: + default: + <-ch + ch <- w.jwtBundlesResp + } + } +} + +func (w *WorkloadAPI) SetX509Bundles(x509Bundles ...*x509bundle.Bundle) { + resp := &workload.X509BundlesResponse{ + Bundles: make(map[string][]byte), + } + for _, bundle := range x509Bundles { + bundleBytes, err := bundle.Marshal() + assert.NoError(w.tb, err) + bundlePem, err := pemutil.ParseCertificates(bundleBytes) + assert.NoError(w.tb, err) + + var rawBytes []byte + for _, c := range bundlePem { + rawBytes = append(rawBytes, c.Raw...) + } + + resp.Bundles[bundle.TrustDomain().String()] = rawBytes + } + + w.mu.Lock() + defer w.mu.Unlock() + w.x509BundlesResp = resp + + for ch := range w.x509BundlesChans { + select { + case ch <- w.x509BundlesResp: + default: + <-ch + ch <- w.x509BundlesResp + } + } +} + +type workloadAPIWrapper struct { + workload.UnimplementedSpiffeWorkloadAPIServer + w *WorkloadAPI +} + +func (w *workloadAPIWrapper) FetchX509SVID(req *workload.X509SVIDRequest, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer) error { + return w.w.fetchX509SVID(req, stream) +} + +func (w *workloadAPIWrapper) FetchX509Bundles(req *workload.X509BundlesRequest, stream workload.SpiffeWorkloadAPI_FetchX509BundlesServer) error { + return w.w.fetchX509Bundles(req, stream) +} + +func (w *workloadAPIWrapper) FetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest) (*workload.JWTSVIDResponse, error) { + return w.w.fetchJWTSVID(ctx, req) +} + +func (w *workloadAPIWrapper) FetchJWTBundles(req *workload.JWTBundlesRequest, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer) error { + return w.w.fetchJWTBundles(req, stream) +} + +func (w *workloadAPIWrapper) ValidateJWTSVID(ctx context.Context, req *workload.ValidateJWTSVIDRequest) (*workload.ValidateJWTSVIDResponse, error) { + return w.w.validateJWTSVID(ctx, req) +} + +type X509SVIDResponse struct { + SVIDs []*x509svid.SVID + Bundle *x509bundle.Bundle + FederatedBundles []*x509bundle.Bundle +} + +func (r *X509SVIDResponse) ToProto(tb testing.TB) *workload.X509SVIDResponse { + var bundle []byte + if r.Bundle != nil { + bundle = x509util.ConcatRawCertsFromCerts(r.Bundle.X509Authorities()) + } + + pb := &workload.X509SVIDResponse{ + FederatedBundles: make(map[string][]byte), + } + for _, svid := range r.SVIDs { + var keyDER []byte + if svid.PrivateKey != nil { + var err error + keyDER, err = x509.MarshalPKCS8PrivateKey(svid.PrivateKey) + require.NoError(tb, err) + } + pb.Svids = append(pb.Svids, &workload.X509SVID{ + SpiffeId: svid.ID.String(), + X509Svid: x509util.ConcatRawCertsFromCerts(svid.Certificates), + X509SvidKey: keyDER, + Bundle: bundle, + }) + } + for _, v := range r.FederatedBundles { + pb.FederatedBundles[v.TrustDomain().IDString()] = x509util.ConcatRawCertsFromCerts(v.X509Authorities()) + } + + return pb +} + +func (w *WorkloadAPI) fetchX509SVID(_ *workload.X509SVIDRequest, stream workload.SpiffeWorkloadAPI_FetchX509SVIDServer) error { + if err := checkHeader(stream.Context()); err != nil { + return err + } + ch := make(chan *workload.X509SVIDResponse, 1) + w.mu.Lock() + w.x509Chans[ch] = struct{}{} + resp := w.x509Resp + w.mu.Unlock() + + defer func() { + w.mu.Lock() + delete(w.x509Chans, ch) + w.mu.Unlock() + }() + + sendResp := func(resp *workload.X509SVIDResponse) error { + if resp == nil { + return noIdentityError + } + return stream.Send(resp) + } + + if err := sendResp(resp); err != nil { + return err + } + for { + select { + case resp := <-ch: + if err := sendResp(resp); err != nil { + return err + } + case <-stream.Context().Done(): + return stream.Context().Err() + } + } +} + +func (w *WorkloadAPI) fetchX509Bundles(_ *workload.X509BundlesRequest, stream workload.SpiffeWorkloadAPI_FetchX509BundlesServer) error { + if err := checkHeader(stream.Context()); err != nil { + return err + } + ch := make(chan *workload.X509BundlesResponse, 1) + w.mu.Lock() + w.x509BundlesChans[ch] = struct{}{} + resp := w.x509BundlesResp + w.mu.Unlock() + + defer func() { + w.mu.Lock() + delete(w.x509BundlesChans, ch) + w.mu.Unlock() + }() + + sendResp := func(resp *workload.X509BundlesResponse) error { + if resp == nil { + return noIdentityError + } + return stream.Send(resp) + } + + if err := sendResp(resp); err != nil { + return err + } + for { + select { + case resp := <-ch: + if err := sendResp(resp); err != nil { + return err + } + case <-stream.Context().Done(): + return stream.Context().Err() + } + } +} + +func (w *WorkloadAPI) fetchJWTSVID(ctx context.Context, req *workload.JWTSVIDRequest) (*workload.JWTSVIDResponse, error) { + if err := checkHeader(ctx); err != nil { + return nil, err + } + if len(req.Audience) == 0 { + return nil, errors.New("no audience") + } + if w.jwtResp == nil { + return nil, noIdentityError + } + + return w.jwtResp, nil +} + +func (w *WorkloadAPI) fetchJWTBundles(_ *workload.JWTBundlesRequest, stream workload.SpiffeWorkloadAPI_FetchJWTBundlesServer) error { + if err := checkHeader(stream.Context()); err != nil { + return err + } + ch := make(chan *workload.JWTBundlesResponse, 1) + w.mu.Lock() + w.jwtBundlesChans[ch] = struct{}{} + resp := w.jwtBundlesResp + w.mu.Unlock() + + defer func() { + w.mu.Lock() + delete(w.jwtBundlesChans, ch) + w.mu.Unlock() + }() + + sendResp := func(resp *workload.JWTBundlesResponse) error { + if resp == nil { + return noIdentityError + } + return stream.Send(resp) + } + + if err := sendResp(resp); err != nil { + return err + } + for { + select { + case resp := <-ch: + if err := sendResp(resp); err != nil { + return err + } + case <-stream.Context().Done(): + return stream.Context().Err() + } + } +} + +func (w *WorkloadAPI) validateJWTSVID(_ context.Context, req *workload.ValidateJWTSVIDRequest) (*workload.ValidateJWTSVIDResponse, error) { + if req.Audience == "" { + return nil, status.Error(codes.InvalidArgument, "audience must be specified") + } + + if req.Svid == "" { + return nil, status.Error(codes.InvalidArgument, "svid must be specified") + } + + // TODO: validate + jwtSvid, err := jwtsvid.ParseInsecure(req.Svid, []string{req.Audience}) + if err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + claims, err := structFromValues(jwtSvid.Claims) + require.NoError(w.tb, err) + + return &workload.ValidateJWTSVIDResponse{ + SpiffeId: jwtSvid.ID.String(), + Claims: claims, + }, nil +} + +func checkHeader(ctx context.Context) error { + return checkMetadata(ctx, "workload.spiffe.io", "true") +} + +func checkMetadata(ctx context.Context, key, value string) error { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return errors.New("request does not contain metadata") + } + values := md.Get(key) + if len(value) == 0 { + return fmt.Errorf("request metadata does not contain %q value", key) + } + if values[0] != value { + return fmt.Errorf("request metadata %q value is %q; expected %q", key, values[0], value) + } + return nil +} + +func structFromValues(values map[string]interface{}) (*structpb.Struct, error) { + valuesJSON, err := json.Marshal(values) + if err != nil { + return nil, err + } + + s := new(structpb.Struct) + if err := protojson.Unmarshal(valuesJSON, s); err != nil { + return nil, err + } + + return s, nil +} diff --git a/pkg/spire/test/fakeworkloadapi/workload_api_posix.go b/pkg/spire/test/fakeworkloadapi/workload_api_posix.go new file mode 100644 index 00000000000..8572e33d95b --- /dev/null +++ b/pkg/spire/test/fakeworkloadapi/workload_api_posix.go @@ -0,0 +1,17 @@ +//go:build !windows +// +build !windows + +package fakeworkloadapi + +import ( + "fmt" + "net" +) + +func newListener() (net.Listener, error) { + return net.Listen("tcp", "localhost:0") +} + +func getTargetName(addr net.Addr) string { + return fmt.Sprintf("%s://%s", addr.Network(), addr.String()) +} diff --git a/pkg/spire/test/fakeworkloadapi/workload_api_windows.go b/pkg/spire/test/fakeworkloadapi/workload_api_windows.go new file mode 100644 index 00000000000..01697fd2a31 --- /dev/null +++ b/pkg/spire/test/fakeworkloadapi/workload_api_windows.go @@ -0,0 +1,65 @@ +//go:build windows +// +build windows + +package fakeworkloadapi + +import ( + "fmt" + "math/rand" + "net" + "strings" + "testing" + "time" + + "github.com/Microsoft/go-winio" + "github.com/spiffe/go-spiffe/v2/proto/spiffe/workload" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func NewWithNamedPipeListener(tb testing.TB) *WorkloadAPI { + w := &WorkloadAPI{ + x509Chans: make(map[chan *workload.X509SVIDResponse]struct{}), + jwtBundlesChans: make(map[chan *workload.JWTBundlesResponse]struct{}), + } + + listener, err := winio.ListenPipe(fmt.Sprintf(`\\.\pipe\go-spiffe-test-pipe-%x`, rand.Uint64()), nil) + require.NoError(tb, err) + + server := grpc.NewServer() + workload.RegisterSpiffeWorkloadAPIServer(server, &workloadAPIWrapper{w: w}) + + w.wg.Add(1) + go func() { + defer w.wg.Done() + _ = server.Serve(listener) + }() + + w.addr = getTargetName(listener.Addr()) + tb.Logf("WorkloadAPI address: %s", w.addr) + w.server = server + return w +} + +func GetPipeName(s string) string { + return strings.TrimPrefix(s, `\\.\pipe`) +} + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +func newListener() (net.Listener, error) { + return winio.ListenPipe(fmt.Sprintf(`\\.\pipe\go-spiffe-test-pipe-%x`, rand.Uint64()), nil) +} + +func getTargetName(addr net.Addr) string { + if addr.Network() == "pipe" { + // The go-winio library defines the network of a + // named pipe address as "pipe", but we use the + // "npipe" scheme for named pipes URLs. + return "npipe:" + GetPipeName(addr.String()) + } + + return fmt.Sprintf("%s://%s", addr.Network(), addr.String()) +} diff --git a/pkg/spire/test/keys.go b/pkg/spire/test/keys.go new file mode 100644 index 00000000000..2817a150c5d --- /dev/null +++ b/pkg/spire/test/keys.go @@ -0,0 +1,38 @@ +package test + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +// Methods to generate private keys. If generation starts slowing down test +// execution then switch over to pre-generated keys. + +// NewEC256Key returns an ECDSA key over the P256 curve +func NewEC256Key(tb testing.TB) *ecdsa.PrivateKey { + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(tb, err) + return key +} + +// NewKeyID returns a random id useful for identifying keys +func NewKeyID(tb testing.TB) string { + choices := make([]byte, 32) + _, err := rand.Read(choices) + require.NoError(tb, err) + return keyIDFromBytes(choices) +} + +func keyIDFromBytes(choices []byte) string { + const alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + var builder strings.Builder + for _, choice := range choices { + builder.WriteByte(alphabet[int(choice)%len(alphabet)]) + } + return builder.String() +} diff --git a/pkg/spire/test/pemutil/pem.go b/pkg/spire/test/pemutil/pem.go new file mode 100644 index 00000000000..26617525a31 --- /dev/null +++ b/pkg/spire/test/pemutil/pem.go @@ -0,0 +1,123 @@ +package pemutil + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +const ( + certType string = "CERTIFICATE" + keyType string = "PRIVATE KEY" +) + +func ParseCertificates(certsBytes []byte) ([]*x509.Certificate, error) { + objects, err := parseBlocks(certsBytes, certType) + if err != nil { + return nil, err + } + + certs := []*x509.Certificate{} + for _, object := range objects { + cert, ok := object.(*x509.Certificate) + if !ok { + return nil, fmt.Errorf("expected *x509.Certificate; got %T", object) + } + certs = append(certs, cert) + } + + return certs, nil +} + +func ParsePrivateKey(keyBytes []byte) (crypto.PrivateKey, error) { + objects, err := parseBlocks(keyBytes, keyType) + if err != nil { + return nil, err + } + if len(objects) == 0 { + return nil, nil + } + + privateKey, ok := objects[0].(crypto.PrivateKey) + if !ok { + return nil, fmt.Errorf("expected crypto.PrivateKey; got %T", objects[0]) + } + return privateKey, nil +} + +func EncodePKCS8PrivateKey(privateKey interface{}) ([]byte, error) { + keyBytes, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: keyType, + Bytes: keyBytes, + }), nil +} + +func EncodeCertificates(certificates []*x509.Certificate) []byte { + pemBytes := []byte{} + for _, cert := range certificates { + pemBytes = append(pemBytes, pem.EncodeToMemory(&pem.Block{ + Type: certType, + Bytes: cert.Raw, + })...) + } + return pemBytes +} + +func parseBlocks(blocksBytes []byte, expectedType string) ([]interface{}, error) { + objects := []interface{}{} + var foundBlocks = false + for { + if len(blocksBytes) == 0 { + if len(objects) == 0 && !foundBlocks { + return nil, errors.New("no PEM blocks found") + } + return objects, nil + } + object, rest, foundBlock, err := parseBlock(blocksBytes, expectedType) + blocksBytes = rest + if foundBlock { + foundBlocks = true + } + switch { + case err != nil: + return nil, err + case object != nil: + objects = append(objects, object) + } + } +} + +func parseBlock(pemBytes []byte, pemType string) (interface{}, []byte, bool, error) { + pemBlock, rest := pem.Decode(pemBytes) + if pemBlock == nil { + return nil, nil, false, nil + } + + if pemBlock.Type != pemType { + return nil, rest, true, nil + } + + var object interface{} + var err error + switch pemType { + case certType: + object, err = x509.ParseCertificate(pemBlock.Bytes) + case keyType: + object, err = x509.ParsePKCS8PrivateKey(pemBlock.Bytes) + default: + err = fmt.Errorf("PEM type not supported: %q", pemType) + } + + if err != nil { + return nil, nil, false, err + } + + return object, rest, true, nil +} diff --git a/pkg/spire/test/x509util/util.go b/pkg/spire/test/x509util/util.go new file mode 100644 index 00000000000..c45288d0f6c --- /dev/null +++ b/pkg/spire/test/x509util/util.go @@ -0,0 +1,53 @@ +package x509util + +import ( + "crypto/x509" +) + +// NewCertPool returns a new CertPool with the given X.509 certificates +func NewCertPool(certs []*x509.Certificate) *x509.CertPool { + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool +} + +// CopyX509Authorities copies a slice of X.509 certificates to a new slice. +func CopyX509Authorities(x509Authorities []*x509.Certificate) []*x509.Certificate { + copiedX509Authorities := make([]*x509.Certificate, len(x509Authorities)) + copy(copiedX509Authorities, x509Authorities) + + return copiedX509Authorities +} + +// CertsEqual returns true if the slices of X.509 certificates are equal. +func CertsEqual(a, b []*x509.Certificate) bool { + if len(a) != len(b) { + return false + } + + for i, cert := range a { + if !cert.Equal(b[i]) { + return false + } + } + + return true +} + +func RawCertsFromCerts(certs []*x509.Certificate) [][]byte { + rawCerts := make([][]byte, 0, len(certs)) + for _, cert := range certs { + rawCerts = append(rawCerts, cert.Raw) + } + return rawCerts +} + +func ConcatRawCertsFromCerts(certs []*x509.Certificate) []byte { + var rawCerts []byte + for _, cert := range certs { + rawCerts = append(rawCerts, cert.Raw...) + } + return rawCerts +} diff --git a/pkg/spire/verify.go b/pkg/spire/verify.go new file mode 100644 index 00000000000..b32b7489bdb --- /dev/null +++ b/pkg/spire/verify.go @@ -0,0 +1,354 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "sort" + "strings" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/spiffe/go-spiffe/v2/workloadapi" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +// VerifyTaskRunResults ensures that the TaskRun results are valid and have not been tampered with +func (sc *spireControllerAPIClient) VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + resultMap := map[string]v1beta1.PipelineResourceResult{} + for _, r := range prs { + if r.ResultType == v1beta1.TaskRunResultType { + resultMap[r.Key] = r + } + } + + cert, err := getSVID(resultMap) + if err != nil { + return err + } + + trust, err := getTrustBundle(ctx, sc.workloadAPI) + if err != nil { + return err + } + + if err := verifyManifest(resultMap); err != nil { + return err + } + + if err := verifyCertURI(cert, tr, sc.config.TrustDomain); err != nil { + return err + } + + if err := verifyCertificateTrust(cert, trust); err != nil { + return err + } + + for key := range resultMap { + if strings.HasSuffix(key, KeySignatureSuffix) { + continue + } + if key == KeySVID { + continue + } + if err := verifyResult(cert.PublicKey, key, resultMap); err != nil { + return err + } + } + + return nil +} + +// VerifyStatusInternalAnnotation run multuple verification steps to ensure that the spire status annotations are valid +func (sc *spireControllerAPIClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + if !sc.CheckSpireVerifiedFlag(tr) { + return errors.New("annotation tekton.dev/not-verified = yes failed spire verification") + } + + annotations := tr.Status.Annotations + + // get trust bundle from spire server + trust, err := getTrustBundle(ctx, sc.workloadAPI) + if err != nil { + return err + } + + // verify controller SVID + svid, ok := annotations[controllerSvidAnnotation] + if !ok { + return errors.New("No SVID found") + } + block, _ := pem.Decode([]byte(svid)) + if block == nil { + return fmt.Errorf("invalid SVID: %w", err) + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf("invalid SVID: %w", err) + } + + // verify certificate root of trust + if err := verifyCertificateTrust(cert, trust); err != nil { + return err + } + logger.Infof("Successfully verified certificate %s against SPIRE", svid) + + if err := verifyAnnotation(cert.PublicKey, annotations); err != nil { + return err + } + logger.Info("Successfully verified signature") + + // CheckStatusInternalAnnotation check current status hash vs annotation status hash by controller + if err := CheckStatusInternalAnnotation(tr); err != nil { + return err + } + logger.Info("Successfully verified status annotation hash matches the current taskrun status") + + return nil +} + +// CheckSpireVerifiedFlag checks if the not-verified status annotation is set which would result in spire verification failed +func (sc *spireControllerAPIClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool { + if _, notVerified := tr.Status.Annotations[NotVerifiedAnnotation]; !notVerified { + return true + } + return false +} + +func hashTaskrunStatusInternal(tr *v1beta1.TaskRun) (string, error) { + s, err := json.Marshal(tr.Status.TaskRunStatusFields) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", sha256.Sum256(s)), nil +} + +// CheckStatusInternalAnnotation ensures that the internal status annotation hash and current status hash match +func CheckStatusInternalAnnotation(tr *v1beta1.TaskRun) error { + // get stored hash of status + annotations := tr.Status.Annotations + hash, ok := annotations[TaskRunStatusHashAnnotation] + if !ok { + return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation) + } + // get current hash of status + current, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + if hash != current { + return fmt.Errorf("current status hash and stored annotation hash does not match! Annotation Hash: %s, Current Status Hash: %s", hash, current) + } + + return nil +} + +func getSVID(resultMap map[string]v1beta1.PipelineResourceResult) (*x509.Certificate, error) { + svid, ok := resultMap[KeySVID] + if !ok { + return nil, errors.New("no SVID found") + } + svidValue, err := getResultValue(svid) + if err != nil { + return nil, err + } + block, _ := pem.Decode([]byte(svidValue)) + if block == nil { + return nil, fmt.Errorf("invalid SVID: %w", err) + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("invalid SVID: %w", err) + } + return cert, nil +} + +func getTrustBundle(ctx context.Context, client *workloadapi.Client) (*x509.CertPool, error) { + x509set, err := client.FetchX509Bundles(ctx) + if err != nil { + return nil, err + } + x509Bundle := x509set.Bundles() + if err != nil { + return nil, err + } + if len(x509Bundle) > 0 { + trustPool := x509.NewCertPool() + for _, bundle := range x509Bundle { + for _, c := range bundle.X509Authorities() { + trustPool.AddCert(c) + } + return trustPool, nil + } + } + return nil, errors.Wrap(err, "trust domain bundle empty") +} + +func getFullPath(tr *v1beta1.TaskRun) string { + // URI:spiffe://example.org/ns/default/taskrun/cache-image-pipelinerun-r4r22-fetch-from-git + return fmt.Sprintf("/ns/%s/taskrun/%s", tr.Namespace, tr.Name) +} + +func verifyCertURI(cert *x509.Certificate, tr *v1beta1.TaskRun, trustDomain string) error { + path := getFullPath(tr) + switch { + case len(cert.URIs) == 0: + return fmt.Errorf("cert uri missing for taskrun: %s", tr.Name) + case len(cert.URIs) > 1: + return fmt.Errorf("cert contains more than one URI for taskrun: %s", tr.Name) + case len(cert.URIs) == 1: + if cert.URIs[0].Host != trustDomain { + return fmt.Errorf("cert uri: %s does not match trust domain: %s", cert.URIs[0].Host, trustDomain) + } + if cert.URIs[0].Path != path { + return fmt.Errorf("cert uri: %s does not match taskrun: %s", cert.URIs[0].Path, path) + } + } + return nil +} + +func verifyCertificateTrust(cert *x509.Certificate, rootCertPool *x509.CertPool) error { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return errors.New("cert cannot be verified by provided roots") + } + return nil +} + +func verifyManifest(results map[string]v1beta1.PipelineResourceResult) error { + manifest, ok := results[KeyResultManifest] + if !ok { + return errors.New("no manifest found in results") + } + manifestValue, err := getResultValue(manifest) + if err != nil { + return err + } + s := strings.Split(manifestValue, ",") + for _, key := range s { + _, found := results[key] + if key != "" && !found { + return fmt.Errorf("no result found for %s but is part of the manifest %s", key, manifestValue) + } + } + return nil +} + +func verifyAnnotation(pub interface{}, annotations map[string]string) error { + signature, ok := annotations[taskRunStatusHashSigAnnotation] + if !ok { + return fmt.Errorf("no signature found for %s", taskRunStatusHashSigAnnotation) + } + hash, ok := annotations[TaskRunStatusHashAnnotation] + if !ok { + return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation) + } + return verifySignature(pub, signature, hash) +} + +func verifyResult(pub crypto.PublicKey, key string, results map[string]v1beta1.PipelineResourceResult) error { + signature, ok := results[key+KeySignatureSuffix] + if !ok { + return fmt.Errorf("no signature found for %s", key) + } + sigValue, err := getResultValue(signature) + if err != nil { + return err + } + resultValue, err := getResultValue(results[key]) + if err != nil { + return err + } + return verifySignature(pub, sigValue, resultValue) +} + +func verifySignature(pub crypto.PublicKey, signature string, value string) error { + b, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("invalid signature: %w", err) + } + h := sha256.Sum256([]byte(value)) + // Check val against sig + switch t := pub.(type) { + case *ecdsa.PublicKey: + if !ecdsa.VerifyASN1(t, h[:], b) { + return errors.New("invalid signature") + } + return nil + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(t, crypto.SHA256, h[:], b) + case ed25519.PublicKey: + if !ed25519.Verify(t, []byte(value), b) { + return errors.New("invalid signature") + } + return nil + default: + return fmt.Errorf("unsupported key type: %s", t) + } +} + +func getResultValue(result v1beta1.PipelineResourceResult) (string, error) { + aos := v1beta1.ArrayOrString{} + err := aos.UnmarshalJSON([]byte(result.Value)) + valList := []string{} + if err != nil { + return "", fmt.Errorf("unmarshal error for key: %s", result.Key) + } + switch aos.Type { + case v1beta1.ParamTypeString: + return aos.StringVal, nil + case v1beta1.ParamTypeArray: + valList = append(valList, aos.ArrayVal...) + return strings.Join(valList, ","), nil + case v1beta1.ParamTypeObject: + keys := make([]string, len(aos.ObjectVal)) + for k := range aos.ObjectVal { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + valList = append(valList, k) + valList = append(valList, aos.ObjectVal[k]) + } + return strings.Join(valList, ","), nil + } + return "", fmt.Errorf("invalid result type for key: %s", result.Key) +} diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 585a96c0f54..76aec5c0f17 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -38,6 +38,66 @@ function install_pipeline_crd_version() { verify_pipeline_installation } +function spire_apply() { + if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then + echo "spire_apply requires a spiffeID as the first arg" >&2 + exit 1 + fi + show=$(kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry show $1 $2) + if [ "$show" != "Found 0 entries" ]; then + # delete to recreate + entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:) + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry delete -entryID $entryid + fi + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create "$@" +} + +function install_spire() { + echo ">> Deploying Spire" + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + echo "Creating SPIRE namespace..." + kubectl create ns spire + + echo "Applying SPIFFE CSI Driver configuration..." + kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml + + echo "Deploying SPIRE server" + kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml + + echo "Deploying SPIRE agent" + kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml + + wait_until_pods_running spire || fail_test "SPIRE did not come up" + + spire_apply \ + -spiffeID spiffe://example.org/ns/spire/node/example \ + -selector k8s_psat:cluster:example-cluster \ + -selector k8s_psat:agent_ns:spire \ + -selector k8s_psat:agent_sa:spire-agent \ + -node + spire_apply \ + -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \ + -parentID spiffe://example.org/ns/spire/node/example \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller \ + -admin +} + +function patch_pipline_spire() { + kubectl patch \ + deployment tekton-pipelines-controller \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json + + verify_pipeline_installation +} + + function verify_pipeline_installation() { # Make sure that everything is cleaned up in the current namespace. delete_pipeline_resources diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 7a23a8016ee..496a38c1933 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -37,9 +37,24 @@ fi header "Setting up environment" -install_pipeline_crd - -failed=0 +function alpha_gate() { + local gate="$1" + if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then + printf "Invalid gate %s\n" ${gate} + exit 255 + fi + if [ "$gate" == "alpha" ] ; then + printf "Setting up environement for alpha features" + install_spire + install_pipeline_crd + patch_pipline_spire + failed=0 + else + printf "Setting up environement for non-alpha features" + install_pipeline_crd + failed=0 + fi +} function set_feature_gate() { local gate="$1" @@ -81,6 +96,7 @@ function run_e2e() { fi } +alpha_gate "$PIPELINE_FEATURE_GATE" set_feature_gate "$PIPELINE_FEATURE_GATE" set_embedded_status "$EMBEDDED_STATUS_GATE" run_e2e diff --git a/test/embed_test.go b/test/embed_test.go index 4b15b002e67..0d473370d99 100644 --- a/test/embed_test.go +++ b/test/embed_test.go @@ -41,10 +41,29 @@ const ( // TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be // executed with an embedded resource spec. func TestTaskRun_EmbeddedResource(t *testing.T) { + embeddedResourceTest(t, false) +} + +// TestTaskRun_EmbeddedResourceWithSpire is an integration test with spire enabled that will verify a very simple "hello world" TaskRun can be +// executed with an embedded resource spec. +func TestTaskRun_EmbeddedResourceWithSpire(t *testing.T) { + embeddedResourceTest(t, true) +} + +func embeddedResourceTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -68,6 +87,15 @@ func TestTaskRun_EmbeddedResource(t *testing.T) { // TODO(#127) Currently we have no reliable access to logs from the TaskRun so we'll assume successful // completion of the TaskRun means the TaskRun did what it was intended. + + if spireEnabled { + tr, err := c.TaskRunClient.Get(ctx, embedTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } + } func getEmbeddedTask(t *testing.T, taskName, namespace string, args []string) *v1beta1.Task { diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go index 16828c1af7c..dbf84606fef 100644 --- a/test/entrypoint_test.go +++ b/test/entrypoint_test.go @@ -36,10 +36,31 @@ import ( // that doesn't have a cmd defined. In addition to making sure the steps // are executed in the order specified func TestEntrypointRunningStepsInOrder(t *testing.T) { + entryPointerTest(t, false) +} + +// TestEntrypointRunningStepsInOrderWithSpire is an integration test with spire enabled that will +// verify attempt to the get the entrypoint of a container image +// that doesn't have a cmd defined. In addition to making sure the steps +// are executed in the order specified +func TestEntrypointRunningStepsInOrderWithSpire(t *testing.T) { + entryPointerTest(t, true) +} + +func entryPointerTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -70,4 +91,12 @@ spec: t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) } + if spireEnabled { + tr, err := c.TaskRunClient.Get(ctx, epTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } + } diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 577247a9350..6f46d19d3cd 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -42,11 +42,30 @@ var ( // TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image // and then using helm to deploy it func TestHelmDeployPipelineRun(t *testing.T) { + helmDeploytest(t, false) +} + +// TestHelmDeployPipelineRunWithSpire is an integration test with spire enabled that will verify a pipeline build an image +// and then using helm to deploy it +func TestHelmDeployPipelineRunWithSpire(t *testing.T) { + helmDeploytest(t, true) +} + +func helmDeploytest(t *testing.T, spireEnabled bool) { repo := ensureDockerRepo(t) ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + setupClusterBindingForHelm(ctx, c, t, namespace) var ( @@ -103,6 +122,16 @@ func TestHelmDeployPipelineRun(t *testing.T) { t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(") } + if spireEnabled { + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + helmDeployPipelineRunName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", helmDeployPipelineRunName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } + // cleanup task to remove helm releases from cluster and cluster role bindings, will not fail the test if it fails, just log knativetest.CleanupOnInterrupt(func() { helmCleanup(ctx, c, t, namespace) }, t.Logf) defer helmCleanup(ctx, c, t, namespace) diff --git a/test/hermetic_taskrun_test.go b/test/hermetic_taskrun_test.go index 5a861053bfb..79727b48931 100644 --- a/test/hermetic_taskrun_test.go +++ b/test/hermetic_taskrun_test.go @@ -34,11 +34,30 @@ import ( // it does this by first running the TaskRun normally to make sure it passes // Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network. func TestHermeticTaskRun(t *testing.T) { + hermeticTest(t, false) +} + +// TestHermeticTaskRunWithSpire (with spire enabled) make sure that the hermetic execution mode actually drops network from a TaskRun step +// it does this by first running the TaskRun normally to make sure it passes +// Then, it enables hermetic mode and makes sure the same TaskRun fails because it no longer has access to a network. +func TestHermeticTaskRunWithSpire(t *testing.T) { + hermeticTest(t, true) +} + +func hermeticTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"})) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAnyGate(map[string]string{"enable-api-fields": "alpha"})) + } + t.Parallel() defer tearDown(ctx, t, c, namespace) @@ -67,6 +86,13 @@ func TestHermeticTaskRun(t *testing.T) { if err := WaitForTaskRunState(ctx, c, regularTaskRunName, Succeed(regularTaskRunName), "TaskRunCompleted"); err != nil { t.Errorf("Error waiting for TaskRun %s to finish: %s", regularTaskRunName, err) } + if spireEnabled { + tr, err := c.TaskRunClient.Get(ctx, regularTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldPassTaskRunResultsVerify(tr, t) + } // now, run the task mode with hermetic mode // it should fail, since it shouldn't be able to access any network @@ -79,6 +105,13 @@ func TestHermeticTaskRun(t *testing.T) { if err := WaitForTaskRunState(ctx, c, hermeticTaskRunName, Failed(hermeticTaskRunName), "Failed"); err != nil { t.Errorf("Error waiting for TaskRun %s to fail: %s", hermeticTaskRunName, err) } + if spireEnabled { + tr, err := c.TaskRunClient.Get(ctx, hermeticTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldFailTaskRunResultsVerify(tr, t) + } }) } } diff --git a/test/ignore_step_error_test.go b/test/ignore_step_error_test.go index bc77fdcc8f0..b6522f0735e 100644 --- a/test/ignore_step_error_test.go +++ b/test/ignore_step_error_test.go @@ -33,10 +33,27 @@ import ( ) func TestMissingResultWhenStepErrorIsIgnored(t *testing.T) { + stepErrorTest(t, false) +} + +func TestMissingResultWhenStepErrorIsIgnoredWithSpire(t *testing.T) { + stepErrorTest(t, true) +} + +func stepErrorTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -99,6 +116,10 @@ spec: t.Fatalf("task1 should have produced a result before failing the step") } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + for _, r := range taskrunItem.Status.TaskRunResults { if r.Name == "result1" && r.Value.StringVal != "123" { t.Fatalf("task1 should have initialized a result \"result1\" to \"123\"") diff --git a/test/init_test.go b/test/init_test.go index 55685472f7e..0525f5c00e3 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -31,6 +31,7 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/names" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -46,6 +47,11 @@ import ( "sigs.k8s.io/yaml" ) +var spireFeatureGates = map[string]string{ + "enable-spire": "true", + "enable-api-fields": "alpha", +} + var initMetrics sync.Once var skipRootUserTests = false @@ -268,3 +274,19 @@ func getCRDYaml(ctx context.Context, cs *clients, ns string) ([]byte, error) { return output, nil } + +// Verifies if the taskrun results should not be verified by spire +func spireShouldFailTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) { + if tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition should not be verified as taskrun failed", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as false, which is valid", tr.Name) +} + +// Verifies if the taskrun results are verified by spire +func spireShouldPassTaskRunResultsVerify(tr *v1beta1.TaskRun, t *testing.T) { + if !tr.IsTaskRunResultVerified() { + t.Errorf("Taskrun `%s` status condition not verified. Spire taskrun results verification failure", tr.Name) + } + t.Logf("Taskrun `%s` status results condition verified by spire as true, which is valid", tr.Name) +} diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index 4062e88eb9f..45e152c6d06 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -42,6 +42,15 @@ const ( // TestTaskRun is an integration test that will verify a TaskRun using kaniko func TestKanikoTaskRun(t *testing.T) { + kanikoTest(t, false) +} + +// TestKanikoTaskRunWithSpire is an integration test that will verify a TaskRun using kaniko with Spire enabled +func TestKanikoTaskRunWithSpire(t *testing.T) { + kanikoTest(t, true) +} + +func kanikoTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,7 +59,15 @@ func TestKanikoTaskRun(t *testing.T) { t.Skip("Skip test as skipRootUserTests set to true") } - c, namespace := setup(ctx, t, withRegistry) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, withRegistry, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, withRegistry) + } + t.Parallel() repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace) @@ -123,6 +140,10 @@ func TestKanikoTaskRun(t *testing.T) { t.Fatalf("Expected remote commit to match local revision: %s, %s", commit, revision) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(tr, t) + } + // match the local digest, which is first capture group against the remote image remoteDigest, err := getRemoteDigest(t, c, namespace, repo) if err != nil { diff --git a/test/pipelinefinally_test.go b/test/pipelinefinally_test.go index c5afe638c09..c7867660ec3 100644 --- a/test/pipelinefinally_test.go +++ b/test/pipelinefinally_test.go @@ -44,10 +44,27 @@ var requireAlphaFeatureFlags = requireAnyGate(map[string]string{ }) func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_Failure(t *testing.T) { + pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneDAGTaskFailed_InvalidTaskResult_FailureWithSpire(t *testing.T) { + pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t, true) +} + +func pipelineLevelFinallyOneDAGTaskFailedInvalidTaskResultFailureWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -260,27 +277,46 @@ spec: if !isFailed(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have failed", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } dagTask1EndTime = taskrunItem.Status.CompletionTime case n == "dagtask2": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } dagTask2EndTime = taskrunItem.Status.CompletionTime case n == "dagtask4": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("task %s should have skipped due to when expression", n) case n == "dagtask5": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "finaltask1": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } finalTaskStartTime = taskrunItem.Status.StartTime case n == "finaltask2": if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } for _, p := range taskrunItem.Spec.Params { switch param := p.Name; param { case "dagtask1-status": @@ -306,6 +342,9 @@ spec: if err := WaitForTaskRunState(ctx, c, taskrunItem.Name, TaskRunSucceed(taskrunItem.Name), "TaskRunSuccess"); err != nil { t.Errorf("Error waiting for TaskRun to succeed: %v", err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } for _, p := range taskrunItem.Spec.Params { if p.Name == "dagtask-result" && p.Value.StringVal != "Hello" { t.Errorf("Error resolving task result reference in a finally task %s", n) @@ -315,13 +354,27 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "guardedfinaltaskusingdagtask5status1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "guardedfinaltaskusingdagtask5result2": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("final task %s should have skipped due to when expression evaluating to false", n) case n == "finaltaskconsumingdagtask1" || n == "finaltaskconsumingdagtask4" || n == "guardedfinaltaskconsumingdagtask4": + if spireEnabled { + // Skipped so status annotations should not be there. Results should not be verified as not run + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("final task %s should have skipped due to missing task result reference", n) default: t.Fatalf("Found unexpected taskRun %s", n) @@ -394,10 +447,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTaskFailed_Failure(t *testing.T) { + pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTaskFailed_FailureWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskFailedFailureWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -451,10 +521,16 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case n == "finaltask1": if !isFailed(t, n, taskrunItem.Status.Conditions) { t.Fatalf("final task %s should have failed", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") } @@ -462,10 +538,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinally(t *testing.T) { + pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTask_CancelledRunFinallyWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskCancelledRunFinallyWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAlphaFeatureFlags) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAlphaFeatureFlags) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -562,13 +655,25 @@ spec: if !isCancelled(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have been cancelled", n) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } case "dagtask2": + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("second dag task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) case "finaltask1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("first final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask2": + if spireEnabled { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } t.Fatalf("second final task %s should be skipped as it depends on the result from cancelled 'dagtask1'", n) default: t.Fatalf("TaskRuns were not found for both final and dag tasks") @@ -577,10 +682,27 @@ spec: } func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinally(t *testing.T) { + pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, false) +} + +func TestPipelineLevelFinally_OneFinalTask_StoppedRunFinallyWithSpire(t *testing.T) { + pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t, true) +} + +func pipelineLevelFinallyOneFinalTaskStoppedRunFinallyWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t, requireAlphaFeatureFlags) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t, requireAlphaFeatureFlags) + } + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -677,14 +799,23 @@ spec: if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("dag task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask1": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("first final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } case "finaltask2": if !isSuccessful(t, n, taskrunItem.Status.Conditions) { t.Fatalf("second final task %s should have succeeded", n) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } default: t.Fatalf("TaskRuns were not found for both final and dag tasks") } diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 6f11fd59413..2f180dad407 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -176,6 +176,15 @@ spec: } func TestPipelineRun(t *testing.T) { + pipelineTestWithOptions(t, false) +} + +// Used different function name as helpers.ObjectNameForTest(t) would run into an issue with the number of characters exceeding the limit causing it to crash +func TestWithSpirePR(t *testing.T) { + pipelineTestWithOptions(t, true) +} + +func pipelineTestWithOptions(t *testing.T, spireEnabled bool) { t.Parallel() type tests struct { name string @@ -315,7 +324,15 @@ spec: ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -347,6 +364,9 @@ spec: if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) { taskRunName = actualTaskRunItem.Name } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(&actualTaskRunItem, t) + } } expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) r, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) @@ -448,16 +468,35 @@ spec: // TestPipelineRunRefDeleted tests that a running PipelineRun doesn't fail when the Pipeline // it references is deleted. func TestPipelineRunRefDeleted(t *testing.T) { + pipelineRunRefDeletedTestWithOptions(t, false) +} + +// TestPipelineRunRefDeletedWithSpire tests (with spire enabled) that a running PipelineRun doesn't fail when the Pipeline +// it references is deleted. +func TestPipelineRunRefDeletedWithSpire(t *testing.T) { + pipelineRunRefDeletedTestWithOptions(t, true) +} + +func pipelineRunRefDeletedTestWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) pipelineName := helpers.ObjectNameForTest(t) prName := helpers.ObjectNameForTest(t) + t.Logf("Creating Pipeline, and PipelineRun %s in namespace %s", prName, namespace) pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` @@ -515,6 +554,16 @@ spec: t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } + if spireEnabled { + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } + } // TestPipelineRunPending tests that a Pending PipelineRun is not run until the pending @@ -522,10 +571,30 @@ spec: // transition PipelineRun states during the test, which the TestPipelineRun suite does not // support. func TestPipelineRunPending(t *testing.T) { + pipelineRunPendingTestWithOptions(t, false) +} + +// TestPipelineRunPendingWithSpire tests (with spire) that a Pending PipelineRun is not run until the pending +// status is cleared. This is separate from the TestPipelineRun suite because it has to +// transition PipelineRun states during the test, which the TestPipelineRun suite does not +// support. +func TestPipelineRunPendingWithSpire(t *testing.T) { + pipelineRunPendingTestWithOptions(t, true) +} + +func pipelineRunPendingTestWithOptions(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) @@ -601,6 +670,15 @@ spec: if err := WaitForPipelineRunState(ctx, c, prName, timeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) } + if spireEnabled { + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + prName}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldPassTaskRunResultsVerify(&taskrunItem, t) + } + } } func getFanInFanOutTasks(t *testing.T, namespace string) map[string]*v1beta1.Task { diff --git a/test/status_test.go b/test/status_test.go index 551dff1d858..d53d2a40c33 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -35,10 +35,30 @@ import ( // verify a very simple "hello world" TaskRun and PipelineRun failure // execution lead to the correct TaskRun status. func TestTaskRunPipelineRunStatus(t *testing.T) { + taskRunPipelineRunStatus(t, false) +} + +// TestTaskRunPipelineRunStatusWithSpire is an integration test with spire enabled that will +// verify a very simple "hello world" TaskRun and PipelineRun failure +// execution lead to the correct TaskRun status. +func TestTaskRunPipelineRunStatusWithSpire(t *testing.T) { + taskRunPipelineRunStatus(t, true) +} + +func taskRunPipelineRunStatus(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -72,6 +92,14 @@ spec: t.Errorf("Error waiting for TaskRun to finish: %s", err) } + if spireEnabled { + tr, err := c.TaskRunClient.Get(ctx, taskRun.Name, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + spireShouldFailTaskRunResultsVerify(tr, t) + } + pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` metadata: name: %s @@ -98,4 +126,15 @@ spec: if err := WaitForPipelineRunState(ctx, c, pipelineRun.Name, timeout, PipelineRunFailed(pipelineRun.Name), "BuildValidationFailed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } + + if spireEnabled { + taskrunList, err := c.TaskRunClient.List(ctx, metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=" + pipelineRun.Name}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) + } + for _, taskrunItem := range taskrunList.Items { + spireShouldFailTaskRunResultsVerify(&taskrunItem, t) + } + } + } diff --git a/test/taskrun_test.go b/test/taskrun_test.go index f6b2a790f08..2cfcf1c100c 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -21,12 +21,14 @@ package test import ( "context" + "encoding/json" "fmt" "regexp" "strings" "testing" "github.com/tektoncd/pipeline/test/parse" + jsonpatch "gomodules.xyz/jsonpatch/v2" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -34,16 +36,33 @@ import ( "github.com/tektoncd/pipeline/pkg/pod" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" knativetest "knative.dev/pkg/test" "knative.dev/pkg/test/helpers" ) func TestTaskRunFailure(t *testing.T) { + taskrunFailureTest(t, false) +} + +func TestTaskRunFailureWithSpire(t *testing.T) { + taskrunFailureTest(t, true) +} + +func taskrunFailureTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -93,6 +112,10 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if spireEnabled { + spireShouldFailTaskRunResultsVerify(taskrun, t) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -136,10 +159,27 @@ spec: } func TestTaskRunStatus(t *testing.T) { + taskrunStatusTest(t, false) +} + +func TestTaskRunStatusWithSpire(t *testing.T) { + taskrunStatusTest(t, true) +} + +func taskrunStatusTest(t *testing.T, spireEnabled bool) { ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - c, namespace := setup(ctx, t) + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + t.Parallel() knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) @@ -175,7 +215,7 @@ spec: t.Fatalf("Failed to create TaskRun: %s", err) } - t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) + t.Logf("Waiting for TaskRun in namespace %s to succeed}", namespace) if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { t.Errorf("Error waiting for TaskRun to finish: %s", err) } @@ -185,6 +225,10 @@ spec: t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } + if spireEnabled { + spireShouldPassTaskRunResultsVerify(taskrun, t) + } + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ @@ -210,3 +254,113 @@ spec: t.Fatalf("-got, +want: %v", d) } } + +func TestTaskRunModification(t *testing.T) { + taskrunModificationTest(t, false) +} + +func TestTaskRunModificationWithSpire(t *testing.T) { + taskrunModificationTest(t, true) +} + +func taskrunModificationTest(t *testing.T, spireEnabled bool) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var c *clients + var namespace string + + if spireEnabled { + c, namespace = setup(ctx, t, requireAnyGate(spireFeatureGates)) + } else { + c, namespace = setup(ctx, t) + } + + knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) + defer tearDown(ctx, t, c, namespace) + + taskRunName := "non-falsifiable-provenance" + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := parse.MustParseTask(t, fmt.Sprintf(` +metadata: + name: non-falsifiable + namespace: %s +spec: + steps: + - image: ubuntu + script: | + #!/usr/bin/env bash + sleep 20 + printf "hello" > "$(results.foo.path)" + printf "world" > "$(results.bar.path)" + results: + - name: foo + - name: bar +`, namespace)) + if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := parse.MustParseTaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: non-falsifiable +`, taskRunName, namespace)) + if _, err := c.TaskRunClient.Create(ctx, taskRun, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to be in running state", namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, Running(taskRunName), "TaskRunRunning"); err != nil { + t.Errorf("Error waiting for TaskRun to start running: %s", err) + } + + patches := []jsonpatch.JsonPatchOperation{{ + Operation: "replace", + Path: "/status/taskSpec/steps/0/image", + Value: "not-ubuntu", + }} + patchBytes, err := json.Marshal(patches) + if err != nil { + t.Fatalf("failed to marshal patch bytes in order to stop") + } + t.Logf("Patching TaskRun %s in namespace %s mid run for spire to catch the un-authorized changed", taskRunName, namespace) + if _, err := c.TaskRunClient.Patch(ctx, taskRunName, types.JSONPatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil { + t.Fatalf("Failed to patch taskrun `%s`: %s", taskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to succeed", taskRunName, namespace) + if err := WaitForTaskRunState(ctx, c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.TaskRunClient.Get(ctx, taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + if spireEnabled { + spireShouldFailTaskRunResultsVerify(taskrun, t) + } + + expectedStepState := []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }} + + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } +} diff --git a/test/testdata/patch/pipeline-controller-spire.json b/test/testdata/patch/pipeline-controller-spire.json new file mode 100644 index 00000000000..c137f675cb0 --- /dev/null +++ b/test/testdata/patch/pipeline-controller-spire.json @@ -0,0 +1,55 @@ +{ + "spec":{ + "template":{ + "spec":{ + "$setElementOrder/containers":[ + { + "name":"tekton-pipelines-controller" + } + ], + "$setElementOrder/volumes":[ + { + "name":"config-logging" + }, + { + "name":"config-registry-cert" + }, + { + "name":"spiffe-workload-api" + } + ], + "containers":[ + { + "$setElementOrder/volumeMounts":[ + { + "mountPath":"/etc/config-logging" + }, + { + "mountPath":"/etc/config-registry-cert" + }, + { + "mountPath":"/spiffe-workload-api" + } + ], + "name":"tekton-pipelines-controller", + "volumeMounts":[ + { + "mountPath":"/spiffe-workload-api", + "name":"spiffe-workload-api", + "readOnly":true + } + ] + } + ], + "volumes":[ + { + "csi":{ + "driver":"csi.spiffe.io" + }, + "name":"spiffe-workload-api" + } + ] + } + } + } +} diff --git a/test/testdata/spire/spiffe-csi-driver.yaml b/test/testdata/spire/spiffe-csi-driver.yaml new file mode 100644 index 00000000000..e9d07bc5683 --- /dev/null +++ b/test/testdata/spire/spiffe-csi-driver.yaml @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "csi.spiffe.io" +spec: + # Only ephemeral, inline volumes are supported. There is no need for a + # controller to provision and attach volumes. + attachRequired: false + + # Request the pod information which the CSI driver uses to verify that an + # ephemeral mount was requested. + podInfoOnMount: true + + # Don't change ownership on the contents of the mount since the Workload API + # Unix Domain Socket is typically open to all (i.e. 0777). + fsGroupPolicy: None + + # Declare support for ephemeral volumes only. + volumeLifecycleModes: + - Ephemeral diff --git a/test/testdata/spire/spire-agent.yaml b/test/testdata/spire/spire-agent.yaml new file mode 100644 index 00000000000..4e848a51388 --- /dev/null +++ b/test/testdata/spire/spire-agent.yaml @@ -0,0 +1,208 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/spire-agent.sock" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + skip_kubelet_verification = true + } + } + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + containers: + - name: spire-agent + image: ghcr.io/spiffe/spire-agent:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-token + mountPath: /var/run/secrets/tokens + - name: spire-agent-socket-dir + mountPath: /run/spire/sockets + # This is the container which runs the SPIFFE CSI driver. + - name: spiffe-csi-driver + image: ghcr.io/spiffe/spiffe-csi-driver:nightly + imagePullPolicy: IfNotPresent + args: [ + "-workload-api-socket-dir", "/spire-agent-socket", + "-csi-socket-path", "/spiffe-csi/csi.sock", + ] + env: + # The CSI driver needs a unique node ID. The node name can be + # used for this purpose. + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + # The volume containing the SPIRE agent socket. The SPIFFE CSI + # driver will mount this directory into containers. + - mountPath: /spire-agent-socket + name: spire-agent-socket-dir + readOnly: true + # The volume that will contain the CSI driver socket shared + # with the kubelet and the driver registrar. + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The volume containing mount points for containers. + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + securityContext: + privileged: true + # This container runs the CSI Node Driver Registrar which takes care + # of all the little details required to register a CSI driver with + # the kubelet. + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + imagePullPolicy: IfNotPresent + args: [ + "-csi-address", "/spiffe-csi/csi.sock", + "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock", + ] + volumeMounts: + # The registrar needs access to the SPIFFE CSI driver socket + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The registrar needs access to the Kubelet plugin registration + # directory + - name: kubelet-plugin-registration-dir + mountPath: /registration + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server + # This volume is used to share the Workload API socket between the CSI + # driver and SPIRE agent. Note, an emptyDir volume could also be used, + # however, this can lead to broken bind mounts in the workload + # containers if the agent pod is restarted (since the emptyDir + # directory on the node that was mounted into workload containers by + # the CSI driver belongs to the old pod instance and is no longer + # valid). + - name: spire-agent-socket-dir + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + # This volume is where the socket for kubelet->driver communication lives + - name: spiffe-csi-socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.spiffe.io + type: DirectoryOrCreate + # This volume is where the SPIFFE CSI driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + # This volume is where the node-driver-registrar registers the plugin + # with kubelet + - name: kubelet-plugin-registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory diff --git a/test/testdata/spire/spire-server.yaml b/test/testdata/spire/spire-server.yaml new file mode 100644 index 00000000000..ceec824613d --- /dev/null +++ b/test/testdata/spire/spire-server.yaml @@ -0,0 +1,211 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: ghcr.io/spiffe/spire-server:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 00000000000..b883f1fdc6d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 00000000000..ae1b4942b91 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 00000000000..b8b569d7746 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 00000000000..683be1dcf9c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,37 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 00000000000..2be34af4310 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000000..4051c1b33bf --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 00000000000..293ab54c80c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,329 @@ +//go:build windows +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// IsClosed checks if the file has been closed +func (f *win32File) IsClosed() bool { + return f.closing.isSet() +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 00000000000..3ab6bff69c5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,73 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 00000000000..b2b644d002a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,316 @@ +//go:build windows +// +build windows + +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) IsClosed() bool { + return conn.sock.IsClosed() +} + +func (conn *HvsockConn) shutdown(how int) error { + if conn.IsClosed() { + return ErrFileClosed + } + + err := syscall.Shutdown(conn.sock.handle, how) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket, preventing future read operations. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, preventing future write operations and +// notifying the other endpoint that no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 00000000000..96700a73de2 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,517 @@ +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { + for { + + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, access) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 00000000000..2d9161e2dee --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,228 @@ +// +build windows + +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go new file mode 100644 index 00000000000..f64d828c0ba --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -0,0 +1,15 @@ +// +build !windows + +package guid + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type as that is only available to builds +// targeted at `windows`. The representation matches that used by native Windows +// code. +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go new file mode 100644 index 00000000000..83617f4eee9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -0,0 +1,10 @@ +package guid + +import "golang.org/x/sys/windows" + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 00000000000..c3dd7c21769 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,203 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 00000000000..fc1ee4d3a3e --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 00000000000..db1b370a1b5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 00000000000..5955c99fdea --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 00000000000..176ff75e320 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,427 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procbind = modws2_32.NewProc("bind") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/LICENSE b/vendor/github.com/spiffe/go-spiffe/v2/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go new file mode 100644 index 00000000000..3d6382f4885 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/bundle.go @@ -0,0 +1,201 @@ +package jwtbundle + +import ( + "crypto" + "encoding/json" + "io" + "io/ioutil" + "sync" + + "github.com/spiffe/go-spiffe/v2/internal/jwtutil" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" + "gopkg.in/square/go-jose.v2" +) + +var ( + jwtbundleErr = errs.Class("jwtbundle") +) + +// Bundle is a collection of trusted JWT authorities for a trust domain. +type Bundle struct { + trustDomain spiffeid.TrustDomain + + mtx sync.RWMutex + jwtAuthorities map[string]crypto.PublicKey +} + +// New creates a new bundle. +func New(trustDomain spiffeid.TrustDomain) *Bundle { + return &Bundle{ + trustDomain: trustDomain, + jwtAuthorities: make(map[string]crypto.PublicKey), + } +} + +// FromJWTAuthorities creates a new bundle from JWT authorities +func FromJWTAuthorities(trustDomain spiffeid.TrustDomain, jwtAuthorities map[string]crypto.PublicKey) *Bundle { + return &Bundle{ + trustDomain: trustDomain, + jwtAuthorities: jwtutil.CopyJWTAuthorities(jwtAuthorities), + } +} + +// Load loads a bundle from a file on disk. The file must contain a standard RFC 7517 JWKS document. +func Load(trustDomain spiffeid.TrustDomain, path string) (*Bundle, error) { + bundleBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, jwtbundleErr.New("unable to read JWT bundle: %w", err) + } + + return Parse(trustDomain, bundleBytes) +} + +// Read decodes a bundle from a reader. The contents must contain a standard RFC 7517 JWKS document. +func Read(trustDomain spiffeid.TrustDomain, r io.Reader) (*Bundle, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, jwtbundleErr.New("unable to read: %v", err) + } + + return Parse(trustDomain, b) +} + +// Parse parses a bundle from bytes. The data must be a standard RFC 7517 JWKS document. +func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error) { + jwks := new(jose.JSONWebKeySet) + if err := json.Unmarshal(bundleBytes, jwks); err != nil { + return nil, jwtbundleErr.New("unable to parse JWKS: %v", err) + } + + bundle := New(trustDomain) + for i, key := range jwks.Keys { + if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { + return nil, jwtbundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) + } + } + + return bundle, nil +} + +// TrustDomain returns the trust domain that the bundle belongs to. +func (b *Bundle) TrustDomain() spiffeid.TrustDomain { + return b.trustDomain +} + +// JWTAuthorities returns the JWT authorities in the bundle, keyed by key ID. +func (b *Bundle) JWTAuthorities() map[string]crypto.PublicKey { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return jwtutil.CopyJWTAuthorities(b.jwtAuthorities) +} + +// FindJWTAuthority finds the JWT authority with the given key ID from the bundle. If the authority +// is found, it is returned and the boolean is true. Otherwise, the returned +// value is nil and the boolean is false. +func (b *Bundle) FindJWTAuthority(keyID string) (crypto.PublicKey, bool) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if jwtAuthority, ok := b.jwtAuthorities[keyID]; ok { + return jwtAuthority, true + } + return nil, false +} + +// HasJWTAuthority returns true if the bundle has a JWT authority with the given key ID. +func (b *Bundle) HasJWTAuthority(keyID string) bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + _, ok := b.jwtAuthorities[keyID] + return ok +} + +// AddJWTAuthority adds a JWT authority to the bundle. If a JWT authority already exists +// under the given key ID, it is replaced. A key ID must be specified. +func (b *Bundle) AddJWTAuthority(keyID string, jwtAuthority crypto.PublicKey) error { + if keyID == "" { + return jwtbundleErr.New("keyID cannot be empty") + } + + b.mtx.Lock() + defer b.mtx.Unlock() + + b.jwtAuthorities[keyID] = jwtAuthority + return nil +} + +// RemoveJWTAuthority removes the JWT authority identified by the key ID from the bundle. +func (b *Bundle) RemoveJWTAuthority(keyID string) { + b.mtx.Lock() + defer b.mtx.Unlock() + + delete(b.jwtAuthorities, keyID) +} + +// SetJWTAuthorities sets the JWT authorities in the bundle. +func (b *Bundle) SetJWTAuthorities(jwtAuthorities map[string]crypto.PublicKey) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.jwtAuthorities = jwtutil.CopyJWTAuthorities(jwtAuthorities) +} + +// Empty returns true if the bundle has no JWT authorities. +func (b *Bundle) Empty() bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return len(b.jwtAuthorities) == 0 +} + +// Marshal marshals the JWT bundle into a standard RFC 7517 JWKS document. The +// JWKS does not contain any SPIFFE-specific parameters. +func (b *Bundle) Marshal() ([]byte, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + jwks := jose.JSONWebKeySet{} + for keyID, jwtAuthority := range b.jwtAuthorities { + jwks.Keys = append(jwks.Keys, jose.JSONWebKey{ + Key: jwtAuthority, + KeyID: keyID, + }) + } + + return json.Marshal(jwks) +} + +// Clone clones the bundle. +func (b *Bundle) Clone() *Bundle { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return FromJWTAuthorities(b.trustDomain, b.jwtAuthorities) +} + +// Equal compares the bundle for equality against the given bundle. +func (b *Bundle) Equal(other *Bundle) bool { + if b == nil || other == nil { + return b == other + } + + return b.trustDomain == other.trustDomain && + jwtutil.JWTAuthoritiesEqual(b.jwtAuthorities, other.jwtAuthorities) +} + +// GetJWTBundleForTrustDomain returns the JWT bundle for the given trust +// domain. It implements the Source interface. An error will be returned if +// the trust domain does not match that of the bundle. +func (b *Bundle) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.trustDomain != trustDomain { + return nil, jwtbundleErr.New("no JWT bundle for trust domain %q", trustDomain) + } + + return b, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go new file mode 100644 index 00000000000..c5dc1c3d7e0 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/doc.go @@ -0,0 +1,37 @@ +// Package jwtbundle provides JWT bundle related functionality. +// +// A bundle represents a collection of JWT authorities, i.e., those that +// are used to authenticate SPIFFE JWT-SVIDs. +// +// You can create a new bundle for a specific trust domain: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := jwtbundle.New(td) +// +// Or you can load it from disk: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := jwtbundle.Load(td, "bundle.jwks") +// +// The bundle can be initialized with JWT authorities: +// td := spiffeid.RequireTrustDomain("example.org") +// var jwtAuthorities map[string]crypto.PublicKey = ... +// bundle := jwtbundle.FromJWTAuthorities(td, jwtAuthorities) +// +// In addition, you can add JWT authorities to the bundle: +// var keyID string = ... +// var publicKey crypto.PublicKey = ... +// bundle.AddJWTAuthority(keyID, publicKey) +// +// Bundles can be organized into a set, keyed by trust domain: +// set := jwtbundle.NewSet() +// set.Add(bundle) +// +// A Source is source of JWT bundles for a trust domain. Both the Bundle +// and Set types implement Source: +// // Initialize the source from a bundle or set +// var source jwtbundle.Source = bundle +// // ... or ... +// var source jwtbundle.Source = set +// +// // Use the source to query for bundles by trust domain +// bundle, err := source.GetJWTBundleForTrustDomain(td) +package jwtbundle diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/set.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/set.go new file mode 100644 index 00000000000..048dd0d8a8a --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/set.go @@ -0,0 +1,105 @@ +package jwtbundle + +import ( + "sort" + "sync" + + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Set is a set of bundles, keyed by trust domain. +type Set struct { + mtx sync.RWMutex + bundles map[spiffeid.TrustDomain]*Bundle +} + +// NewSet creates a new set initialized with the given bundles. +func NewSet(bundles ...*Bundle) *Set { + bundlesMap := make(map[spiffeid.TrustDomain]*Bundle) + + for _, b := range bundles { + if b != nil { + bundlesMap[b.trustDomain] = b + } + } + + return &Set{ + bundles: bundlesMap, + } +} + +// Add adds a new bundle into the set. If a bundle already exists for the +// trust domain, the existing bundle is replaced. +func (s *Set) Add(bundle *Bundle) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if bundle != nil { + s.bundles[bundle.trustDomain] = bundle + } +} + +// Remove removes the bundle for the given trust domain. +func (s *Set) Remove(trustDomain spiffeid.TrustDomain) { + s.mtx.Lock() + defer s.mtx.Unlock() + + delete(s.bundles, trustDomain) +} + +// Has returns true if there is a bundle for the given trust domain. +func (s *Set) Has(trustDomain spiffeid.TrustDomain) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + _, ok := s.bundles[trustDomain] + return ok +} + +// Get returns a bundle for the given trust domain. If the bundle is in the set +// it is returned and the boolean is true. Otherwise, the returned value is +// nil and the boolean is false. +func (s *Set) Get(trustDomain spiffeid.TrustDomain) (*Bundle, bool) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + return bundle, ok +} + +// Bundles returns the bundles in the set sorted by trust domain. +func (s *Set) Bundles() []*Bundle { + s.mtx.RLock() + defer s.mtx.RUnlock() + + out := make([]*Bundle, 0, len(s.bundles)) + for _, bundle := range s.bundles { + out = append(out, bundle) + } + sort.Slice(out, func(a, b int) bool { + return out[a].TrustDomain().Compare(out[b].TrustDomain()) < 0 + }) + return out +} + +// Len returns the number of bundles in the set. +func (s *Set) Len() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return len(s.bundles) +} + +// GetJWTBundleForTrustDomain returns the JWT bundle for the given trust +// domain. It implements the Source interface. +func (s *Set) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + if !ok { + return nil, jwtbundleErr.New("no JWT bundle for trust domain %q", trustDomain) + } + + return bundle, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/source.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/source.go new file mode 100644 index 00000000000..224cd9f93ef --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/jwtbundle/source.go @@ -0,0 +1,12 @@ +package jwtbundle + +import ( + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Source represents a source of JWT bundles keyed by trust domain. +type Source interface { + // GetJWTBundleForTrustDomain returns the JWT bundle for the given trust + // domain. + GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go new file mode 100644 index 00000000000..9d2a8d8d827 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/bundle.go @@ -0,0 +1,486 @@ +package spiffebundle + +import ( + "crypto" + "crypto/x509" + "encoding/json" + "io" + "io/ioutil" + "sync" + "time" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/internal/jwtutil" + "github.com/spiffe/go-spiffe/v2/internal/x509util" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" + "gopkg.in/square/go-jose.v2" +) + +const ( + x509SVIDUse = "x509-svid" + jwtSVIDUse = "jwt-svid" +) + +var ( + spiffebundleErr = errs.Class("spiffebundle") +) + +type bundleDoc struct { + jose.JSONWebKeySet + SequenceNumber *uint64 `json:"spiffe_sequence,omitempty"` + RefreshHint *int64 `json:"spiffe_refresh_hint,omitempty"` +} + +// Bundle is a collection of trusted public key material for a trust domain, +// conforming to the SPIFFE Bundle Format as part of the SPIFFE Trust Domain +// and Bundle specification: +// https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE_Trust_Domain_and_Bundle.md +type Bundle struct { + trustDomain spiffeid.TrustDomain + + mtx sync.RWMutex + refreshHint *time.Duration + sequenceNumber *uint64 + jwtAuthorities map[string]crypto.PublicKey + x509Authorities []*x509.Certificate +} + +// New creates a new bundle. +func New(trustDomain spiffeid.TrustDomain) *Bundle { + return &Bundle{ + trustDomain: trustDomain, + jwtAuthorities: make(map[string]crypto.PublicKey), + } +} + +// Load loads a bundle from a file on disk. The file must contain a JWKS +// document following the SPIFFE Trust Domain and Bundle specification. +func Load(trustDomain spiffeid.TrustDomain, path string) (*Bundle, error) { + bundleBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, spiffebundleErr.New("unable to read SPIFFE bundle: %w", err) + } + + return Parse(trustDomain, bundleBytes) +} + +// Read decodes a bundle from a reader. The contents must contain a JWKS +// document following the SPIFFE Trust Domain and Bundle specification. +func Read(trustDomain spiffeid.TrustDomain, r io.Reader) (*Bundle, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, spiffebundleErr.New("unable to read: %v", err) + } + + return Parse(trustDomain, b) +} + +// Parse parses a bundle from bytes. The data must be a JWKS document following +// the SPIFFE Trust Domain and Bundle specification. +func Parse(trustDomain spiffeid.TrustDomain, bundleBytes []byte) (*Bundle, error) { + jwks := &bundleDoc{} + if err := json.Unmarshal(bundleBytes, jwks); err != nil { + return nil, spiffebundleErr.New("unable to parse JWKS: %v", err) + } + + bundle := New(trustDomain) + if jwks.RefreshHint != nil { + bundle.SetRefreshHint(time.Second * time.Duration(*jwks.RefreshHint)) + } + if jwks.SequenceNumber != nil { + bundle.SetSequenceNumber(*jwks.SequenceNumber) + } + + if jwks.Keys == nil { + // The parameter keys MUST be present. + // https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE_Trust_Domain_and_Bundle.md#413-keys + return nil, spiffebundleErr.New("no authorities found") + } + for i, key := range jwks.Keys { + switch key.Use { + // Two SVID types are supported: x509-svid and jwt-svid. + case x509SVIDUse: + if len(key.Certificates) != 1 { + return nil, spiffebundleErr.New("expected a single certificate in %s entry %d; got %d", x509SVIDUse, i, len(key.Certificates)) + } + bundle.AddX509Authority(key.Certificates[0]) + case jwtSVIDUse: + if err := bundle.AddJWTAuthority(key.KeyID, key.Key); err != nil { + return nil, spiffebundleErr.New("error adding authority %d of JWKS: %v", i, errs.Unwrap(err)) + } + } + } + + return bundle, nil +} + +// FromX509Bundle creates a bundle from an X.509 bundle. +// The function panics in case of a nil X.509 bundle. +func FromX509Bundle(x509Bundle *x509bundle.Bundle) *Bundle { + bundle := New(x509Bundle.TrustDomain()) + bundle.x509Authorities = x509Bundle.X509Authorities() + return bundle +} + +// FromJWTBundle creates a bundle from a JWT bundle. +// The function panics in case of a nil JWT bundle. +func FromJWTBundle(jwtBundle *jwtbundle.Bundle) *Bundle { + bundle := New(jwtBundle.TrustDomain()) + bundle.jwtAuthorities = jwtBundle.JWTAuthorities() + return bundle +} + +// FromX509Authorities creates a bundle from X.509 certificates. +func FromX509Authorities(trustDomain spiffeid.TrustDomain, x509Authorities []*x509.Certificate) *Bundle { + bundle := New(trustDomain) + bundle.x509Authorities = x509util.CopyX509Authorities(x509Authorities) + return bundle +} + +// FromJWTAuthorities creates a new bundle from JWT authorities. +func FromJWTAuthorities(trustDomain spiffeid.TrustDomain, jwtAuthorities map[string]crypto.PublicKey) *Bundle { + bundle := New(trustDomain) + bundle.jwtAuthorities = jwtutil.CopyJWTAuthorities(jwtAuthorities) + return bundle +} + +// TrustDomain returns the trust domain that the bundle belongs to. +func (b *Bundle) TrustDomain() spiffeid.TrustDomain { + return b.trustDomain +} + +// X509Authorities returns the X.509 authorities in the bundle. +func (b *Bundle) X509Authorities() []*x509.Certificate { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return x509util.CopyX509Authorities(b.x509Authorities) +} + +// AddX509Authority adds an X.509 authority to the bundle. If the authority already +// exists in the bundle, the contents of the bundle will remain unchanged. +func (b *Bundle) AddX509Authority(x509Authority *x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + for _, r := range b.x509Authorities { + if r.Equal(x509Authority) { + return + } + } + + b.x509Authorities = append(b.x509Authorities, x509Authority) +} + +// RemoveX509Authority removes an X.509 authority from the bundle. +func (b *Bundle) RemoveX509Authority(x509Authority *x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + for i, r := range b.x509Authorities { + if r.Equal(x509Authority) { + b.x509Authorities = append(b.x509Authorities[:i], b.x509Authorities[i+1:]...) + return + } + } +} + +// HasX509Authority checks if the given X.509 authority exists in the bundle. +func (b *Bundle) HasX509Authority(x509Authority *x509.Certificate) bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + for _, r := range b.x509Authorities { + if r.Equal(x509Authority) { + return true + } + } + return false +} + +// SetX509Authorities sets the X.509 authorities in the bundle. +func (b *Bundle) SetX509Authorities(authorities []*x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.x509Authorities = x509util.CopyX509Authorities(authorities) +} + +// JWTAuthorities returns the JWT authorities in the bundle, keyed by key ID. +func (b *Bundle) JWTAuthorities() map[string]crypto.PublicKey { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return jwtutil.CopyJWTAuthorities(b.jwtAuthorities) +} + +// FindJWTAuthority finds the JWT authority with the given key ID from the bundle. If the authority +// is found, it is returned and the boolean is true. Otherwise, the returned +// value is nil and the boolean is false. +func (b *Bundle) FindJWTAuthority(keyID string) (crypto.PublicKey, bool) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + jwtAuthority, ok := b.jwtAuthorities[keyID] + return jwtAuthority, ok +} + +// HasJWTAuthority returns true if the bundle has a JWT authority with the given key ID. +func (b *Bundle) HasJWTAuthority(keyID string) bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + _, ok := b.jwtAuthorities[keyID] + return ok +} + +// AddJWTAuthority adds a JWT authority to the bundle. If a JWT authority already exists +// under the given key ID, it is replaced. A key ID must be specified. +func (b *Bundle) AddJWTAuthority(keyID string, jwtAuthority crypto.PublicKey) error { + if keyID == "" { + return spiffebundleErr.New("keyID cannot be empty") + } + + b.mtx.Lock() + defer b.mtx.Unlock() + + b.jwtAuthorities[keyID] = jwtAuthority + return nil +} + +// RemoveJWTAuthority removes the JWT authority identified by the key ID from the bundle. +func (b *Bundle) RemoveJWTAuthority(keyID string) { + b.mtx.Lock() + defer b.mtx.Unlock() + + delete(b.jwtAuthorities, keyID) +} + +// SetJWTAuthorities sets the JWT authorities in the bundle. +func (b *Bundle) SetJWTAuthorities(jwtAuthorities map[string]crypto.PublicKey) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.jwtAuthorities = jwtutil.CopyJWTAuthorities(jwtAuthorities) +} + +// Empty returns true if the bundle has no X.509 and JWT authorities. +func (b *Bundle) Empty() bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return len(b.x509Authorities) == 0 && len(b.jwtAuthorities) == 0 +} + +// RefreshHint returns the refresh hint. If the refresh hint is set in +// the bundle, it is returned and the boolean is true. Otherwise, the returned +// value is zero and the boolean is false. +func (b *Bundle) RefreshHint() (refreshHint time.Duration, ok bool) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.refreshHint != nil { + return *b.refreshHint, true + } + return 0, false +} + +// SetRefreshHint sets the refresh hint. The refresh hint value will be +// truncated to time.Second. +func (b *Bundle) SetRefreshHint(refreshHint time.Duration) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.refreshHint = &refreshHint +} + +// ClearRefreshHint clears the refresh hint. +func (b *Bundle) ClearRefreshHint() { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.refreshHint = nil +} + +// SequenceNumber returns the sequence number. If the sequence number is set in +// the bundle, it is returned and the boolean is true. Otherwise, the returned +// value is zero and the boolean is false. +func (b *Bundle) SequenceNumber() (uint64, bool) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.sequenceNumber != nil { + return *b.sequenceNumber, true + } + return 0, false +} + +// SetSequenceNumber sets the sequence number. +func (b *Bundle) SetSequenceNumber(sequenceNumber uint64) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.sequenceNumber = &sequenceNumber +} + +// ClearSequenceNumber clears the sequence number. +func (b *Bundle) ClearSequenceNumber() { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.sequenceNumber = nil +} + +// Marshal marshals the bundle according to the SPIFFE Trust Domain and Bundle +// specification. The trust domain is not marshaled as part of the bundle and +// must be conveyed separately. See the specification for details. +func (b *Bundle) Marshal() ([]byte, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + jwks := bundleDoc{} + if b.refreshHint != nil { + tr := int64((*b.refreshHint + (time.Second - 1)) / time.Second) + jwks.RefreshHint = &tr + } + jwks.SequenceNumber = b.sequenceNumber + for _, x509Authority := range b.x509Authorities { + jwks.Keys = append(jwks.Keys, jose.JSONWebKey{ + Key: x509Authority.PublicKey, + Certificates: []*x509.Certificate{x509Authority}, + Use: x509SVIDUse, + }) + } + + for keyID, jwtAuthority := range b.jwtAuthorities { + jwks.Keys = append(jwks.Keys, jose.JSONWebKey{ + Key: jwtAuthority, + KeyID: keyID, + Use: jwtSVIDUse, + }) + } + + return json.Marshal(jwks) +} + +// Clone clones the bundle. +func (b *Bundle) Clone() *Bundle { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return &Bundle{ + trustDomain: b.trustDomain, + refreshHint: copyRefreshHint(b.refreshHint), + sequenceNumber: copySequenceNumber(b.sequenceNumber), + x509Authorities: x509util.CopyX509Authorities(b.x509Authorities), + jwtAuthorities: jwtutil.CopyJWTAuthorities(b.jwtAuthorities), + } +} + +// X509Bundle returns an X.509 bundle containing the X.509 authorities in the SPIFFE +// bundle. +func (b *Bundle) X509Bundle() *x509bundle.Bundle { + b.mtx.RLock() + defer b.mtx.RUnlock() + + // FromX509Authorities makes a copy, so we can pass our internal slice directly. + return x509bundle.FromX509Authorities(b.trustDomain, b.x509Authorities) +} + +// JWTBundle returns a JWT bundle containing the JWT authorities in the SPIFFE bundle. +func (b *Bundle) JWTBundle() *jwtbundle.Bundle { + b.mtx.RLock() + defer b.mtx.RUnlock() + + // FromJWTBundle makes a copy, so we can pass our internal slice directly. + return jwtbundle.FromJWTAuthorities(b.trustDomain, b.jwtAuthorities) +} + +// GetBundleForTrustDomain returns the SPIFFE bundle for the given trust +// domain. It implements the Source interface. An error will be returned if the +// trust domain does not match that of the bundle. +func (b *Bundle) GetBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.trustDomain != trustDomain { + return nil, spiffebundleErr.New("no SPIFFE bundle for trust domain %q", trustDomain) + } + + return b, nil +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the x509bundle.Source interface. An error will be +// returned if the trust domain does not match that of the bundle. +func (b *Bundle) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*x509bundle.Bundle, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.trustDomain != trustDomain { + return nil, spiffebundleErr.New("no X.509 bundle for trust domain %q", trustDomain) + } + + return b.X509Bundle(), nil +} + +// GetJWTBundleForTrustDomain returns the JWT bundle of the given trust domain. +// It implements the jwtbundle.Source interface. An error will be returned if +// the trust domain does not match that of the bundle. +func (b *Bundle) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*jwtbundle.Bundle, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + + if b.trustDomain != trustDomain { + return nil, spiffebundleErr.New("no JWT bundle for trust domain %q", trustDomain) + } + + return b.JWTBundle(), nil +} + +// Equal compares the bundle for equality against the given bundle. +func (b *Bundle) Equal(other *Bundle) bool { + if b == nil || other == nil { + return b == other + } + + return b.trustDomain == other.trustDomain && + refreshHintEqual(b.refreshHint, other.refreshHint) && + sequenceNumberEqual(b.sequenceNumber, other.sequenceNumber) && + jwtutil.JWTAuthoritiesEqual(b.jwtAuthorities, other.jwtAuthorities) && + x509util.CertsEqual(b.x509Authorities, other.x509Authorities) +} + +func refreshHintEqual(a, b *time.Duration) bool { + if a == nil || b == nil { + return a == b + } + + return *a == *b +} + +func sequenceNumberEqual(a, b *uint64) bool { + if a == nil || b == nil { + return a == b + } + + return *a == *b +} + +func copyRefreshHint(refreshHint *time.Duration) *time.Duration { + if refreshHint == nil { + return nil + } + copied := *refreshHint + return &copied +} + +func copySequenceNumber(sequenceNumber *uint64) *uint64 { + if sequenceNumber == nil { + return nil + } + copied := *sequenceNumber + return &copied +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go new file mode 100644 index 00000000000..6abae68f2bc --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/doc.go @@ -0,0 +1,53 @@ +// Package spiffebundle provides SPIFFE bundle related functionality. +// +// A bundle represents a SPIFFE bundle, a collection authorities for +// authenticating SVIDs. +// +// You can create a new bundle for a specific trust domain: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := spiffebundle.New(td) +// +// Or you can load it from disk: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := spiffebundle.Load(td, "bundle.json") +// +// The bundle can be initialized with X.509 or JWT authorities: +// td := spiffeid.RequireTrustDomain("example.org") +// +// var x509Authorities []*x509.Certificate = ... +// bundle := spiffebundle.FromX509Authorities(td, x509Authorities) +// // ... or ... +// var jwtAuthorities map[string]crypto.PublicKey = ... +// bundle := spiffebundle.FromJWTAuthorities(td, jwtAuthorities) +// +// In addition, you can add authorities to the bundle: +// var x509CA *x509.Certificate = ... +// bundle.AddX509Authority(x509CA) +// var keyID string = ... +// var publicKey crypto.PublicKey = ... +// bundle.AddJWTAuthority(keyID, publicKey) +// +// Bundles can be organized into a set, keyed by trust domain: +// set := spiffebundle.NewSet() +// set.Add(bundle) +// +// A Source is source of bundles for a trust domain. Both the +// Bundle and Set types implement Source: +// // Initialize the source from a bundle or set +// var source spiffebundle.Source = bundle +// // ... or ... +// var source spiffebundle.Source = set +// +// // Use the source to query for X.509 bundles by trust domain +// bundle, err := source.GetBundleForTrustDomain(td) +// +// Additionally the Bundle and Set types also implement the x509bundle.Source and jwtbundle.Source interfaces: +// +// // As an x509bundle.Source... +// var source x509bundle.Source = bundle // or set +// x509Bundle, err := source.GetX509BundleForTrustDomain(td) +// +// // As a jwtbundle.Source... +// var source jwtbundle.Source = bundle // or set +// jwtBundle, err := source.GetJWTBundleForTrustDomain(td) +package spiffebundle diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/set.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/set.go new file mode 100644 index 00000000000..2738135c04a --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/set.go @@ -0,0 +1,135 @@ +package spiffebundle + +import ( + "sort" + "sync" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Set is a set of bundles, keyed by trust domain. +type Set struct { + mtx sync.RWMutex + bundles map[spiffeid.TrustDomain]*Bundle +} + +// NewSet creates a new set initialized with the given bundles. +func NewSet(bundles ...*Bundle) *Set { + bundlesMap := make(map[spiffeid.TrustDomain]*Bundle) + + for _, b := range bundles { + if b != nil { + bundlesMap[b.trustDomain] = b + } + } + + return &Set{ + bundles: bundlesMap, + } +} + +// Add adds a new bundle into the set. If a bundle already exists for the +// trust domain, the existing bundle is replaced. +func (s *Set) Add(bundle *Bundle) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if bundle != nil { + s.bundles[bundle.trustDomain] = bundle + } +} + +// Remove removes the bundle for the given trust domain. +func (s *Set) Remove(trustDomain spiffeid.TrustDomain) { + s.mtx.Lock() + defer s.mtx.Unlock() + + delete(s.bundles, trustDomain) +} + +// Has returns true if there is a bundle for the given trust domain. +func (s *Set) Has(trustDomain spiffeid.TrustDomain) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + _, ok := s.bundles[trustDomain] + return ok +} + +// Get returns a bundle for the given trust domain. If the bundle is in the set +// it is returned and the boolean is true. Otherwise, the returned value is +// nil and the boolean is false. +func (s *Set) Get(trustDomain spiffeid.TrustDomain) (*Bundle, bool) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + return bundle, ok +} + +// Bundles returns the bundles in the set sorted by trust domain. +func (s *Set) Bundles() []*Bundle { + s.mtx.RLock() + defer s.mtx.RUnlock() + + out := make([]*Bundle, 0, len(s.bundles)) + for _, bundle := range s.bundles { + out = append(out, bundle) + } + sort.Slice(out, func(a, b int) bool { + return out[a].TrustDomain().Compare(out[b].TrustDomain()) < 0 + }) + return out +} + +// Len returns the number of bundles in the set. +func (s *Set) Len() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return len(s.bundles) +} + +// GetBundleForTrustDomain returns the SPIFFE bundle for the given trust +// domain. It implements the Source interface. +func (s *Set) GetBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + if !ok { + return nil, spiffebundleErr.New("no SPIFFE bundle for trust domain %q", trustDomain) + } + + return bundle, nil +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the x509bundle.Source interface. +func (s *Set) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*x509bundle.Bundle, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + if !ok { + return nil, spiffebundleErr.New("no X.509 bundle for trust domain %q", trustDomain) + } + + return bundle.X509Bundle(), nil +} + +// GetJWTBundleForTrustDomain returns the JWT bundle for the given trust +// domain. It implements the jwtbundle.Source interface. +func (s *Set) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*jwtbundle.Bundle, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + if !ok { + return nil, spiffebundleErr.New("no JWT bundle for trust domain %q", trustDomain) + } + + return bundle.JWTBundle(), nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/source.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/source.go new file mode 100644 index 00000000000..f4d37125b48 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/spiffebundle/source.go @@ -0,0 +1,10 @@ +package spiffebundle + +import "github.com/spiffe/go-spiffe/v2/spiffeid" + +// Source represents a source of SPIFFE bundles keyed by trust domain. +type Source interface { + // GetBundleForTrustDomain returns the SPIFFE bundle for the given trust + // domain. + GetBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go new file mode 100644 index 00000000000..ad391d04cd2 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/bundle.go @@ -0,0 +1,200 @@ +package x509bundle + +import ( + "crypto/x509" + "io" + "io/ioutil" + "sync" + + "github.com/spiffe/go-spiffe/v2/internal/pemutil" + "github.com/spiffe/go-spiffe/v2/internal/x509util" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" +) + +var x509bundleErr = errs.Class("x509bundle") + +// Bundle is a collection of trusted X.509 authorities for a trust domain. +type Bundle struct { + trustDomain spiffeid.TrustDomain + + mtx sync.RWMutex + x509Authorities []*x509.Certificate +} + +// New creates a new bundle. +func New(trustDomain spiffeid.TrustDomain) *Bundle { + return &Bundle{ + trustDomain: trustDomain, + } +} + +// FromX509Authorities creates a bundle from X.509 certificates. +func FromX509Authorities(trustDomain spiffeid.TrustDomain, authorities []*x509.Certificate) *Bundle { + return &Bundle{ + trustDomain: trustDomain, + x509Authorities: x509util.CopyX509Authorities(authorities), + } +} + +// Load loads a bundle from a file on disk. The file must contain PEM-encoded +// certificate blocks. +func Load(trustDomain spiffeid.TrustDomain, path string) (*Bundle, error) { + fileBytes, err := ioutil.ReadFile(path) + if err != nil { + return nil, x509bundleErr.New("unable to load X.509 bundle file: %w", err) + } + + return Parse(trustDomain, fileBytes) +} + +// Read decodes a bundle from a reader. The contents must be PEM-encoded +// certificate blocks. +func Read(trustDomain spiffeid.TrustDomain, r io.Reader) (*Bundle, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, x509bundleErr.New("unable to read X.509 bundle: %v", err) + } + + return Parse(trustDomain, b) +} + +// Parse parses a bundle from bytes. The data must be PEM-encoded certificate +// blocks. +func Parse(trustDomain spiffeid.TrustDomain, b []byte) (*Bundle, error) { + bundle := New(trustDomain) + certs, err := pemutil.ParseCertificates(b) + if err != nil { + return nil, x509bundleErr.New("cannot parse certificate: %v", err) + } + if len(certs) == 0 { + return nil, x509bundleErr.New("no certificates found") + } + for _, cert := range certs { + bundle.AddX509Authority(cert) + } + return bundle, nil +} + +// ParseRaw parses a bundle from bytes. The certificate must be ASN.1 DER (concatenated +// with no intermediate padding if there are more than one certificate) +func ParseRaw(trustDomain spiffeid.TrustDomain, b []byte) (*Bundle, error) { + bundle := New(trustDomain) + certs, err := x509.ParseCertificates(b) + if err != nil { + return nil, x509bundleErr.New("cannot parse certificate: %v", err) + } + if len(certs) == 0 { + return nil, x509bundleErr.New("no certificates found") + } + for _, cert := range certs { + bundle.AddX509Authority(cert) + } + return bundle, nil +} + +// TrustDomain returns the trust domain that the bundle belongs to. +func (b *Bundle) TrustDomain() spiffeid.TrustDomain { + return b.trustDomain +} + +// X509Authorities returns the X.509 x509Authorities in the bundle. +func (b *Bundle) X509Authorities() []*x509.Certificate { + b.mtx.RLock() + defer b.mtx.RUnlock() + return x509util.CopyX509Authorities(b.x509Authorities) +} + +// AddX509Authority adds an X.509 authority to the bundle. If the authority already +// exists in the bundle, the contents of the bundle will remain unchanged. +func (b *Bundle) AddX509Authority(x509Authority *x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + for _, r := range b.x509Authorities { + if r.Equal(x509Authority) { + return + } + } + + b.x509Authorities = append(b.x509Authorities, x509Authority) +} + +// RemoveX509Authority removes an X.509 authority from the bundle. +func (b *Bundle) RemoveX509Authority(x509Authority *x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + for i, r := range b.x509Authorities { + if r.Equal(x509Authority) { + //remove element from slice + b.x509Authorities = append(b.x509Authorities[:i], b.x509Authorities[i+1:]...) + return + } + } +} + +// HasX509Authority checks if the given X.509 authority exists in the bundle. +func (b *Bundle) HasX509Authority(x509Authority *x509.Certificate) bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + for _, r := range b.x509Authorities { + if r.Equal(x509Authority) { + return true + } + } + return false +} + +// SetX509Authorities sets the X.509 authorities in the bundle. +func (b *Bundle) SetX509Authorities(x509Authorities []*x509.Certificate) { + b.mtx.Lock() + defer b.mtx.Unlock() + + b.x509Authorities = x509util.CopyX509Authorities(x509Authorities) +} + +// Empty returns true if the bundle has no X.509 x509Authorities. +func (b *Bundle) Empty() bool { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return len(b.x509Authorities) == 0 +} + +// Marshal marshals the X.509 bundle into PEM-encoded certificate blocks. +func (b *Bundle) Marshal() ([]byte, error) { + b.mtx.RLock() + defer b.mtx.RUnlock() + return pemutil.EncodeCertificates(b.x509Authorities), nil +} + +// Equal compares the bundle for equality against the given bundle. +func (b *Bundle) Equal(other *Bundle) bool { + if b == nil || other == nil { + return b == other + } + + return b.trustDomain == other.trustDomain && + x509util.CertsEqual(b.x509Authorities, other.x509Authorities) +} + +// Clone clones the bundle. +func (b *Bundle) Clone() *Bundle { + b.mtx.RLock() + defer b.mtx.RUnlock() + + return FromX509Authorities(b.trustDomain, b.x509Authorities) +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the Source interface. An error will be +// returned if the trust domain does not match that of the bundle. +func (b *Bundle) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + if b.trustDomain != trustDomain { + return nil, x509bundleErr.New("no X.509 bundle found for trust domain: %q", trustDomain) + } + + return b, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go new file mode 100644 index 00000000000..0c37688e62f --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/doc.go @@ -0,0 +1,36 @@ +// Package x509bundle provides X.509 bundle related functionality. +// +// A bundle represents a collection of X.509 authorities, i.e., those that +// are used to authenticate SPIFFE X509-SVIDs. +// +// You can create a new bundle for a specific trust domain: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := x509bundle.New(td) +// +// Or you can load it from disk: +// td := spiffeid.RequireTrustDomain("example.org") +// bundle := x509bundle.Load(td, "bundle.pem") +// +// The bundle can be initialized with X.509 authorities: +// td := spiffeid.RequireTrustDomain("example.org") +// var x509Authorities []*x509.Certificate = ... +// bundle := x509bundle.FromX509Authorities(td, x509Authorities) +// +// In addition, you can add X.509 authorities to the bundle: +// var x509CA *x509.Certificate = ... +// bundle.AddX509Authority(x509CA) +// +// Bundles can be organized into a set, keyed by trust domain: +// set := x509bundle.NewSet() +// set.Add(bundle) +// +// A Source is source of X.509 bundles for a trust domain. Both the Bundle +// and Set types implement Source: +// // Initialize the source from a bundle or set +// var source x509bundle.Source = bundle +// // ... or ... +// var source x509bundle.Source = set +// +// // Use the source to query for bundles by trust domain +// bundle, err := source.GetX509BundleForTrustDomain(td) +package x509bundle diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/set.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/set.go new file mode 100644 index 00000000000..522e2492656 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/set.go @@ -0,0 +1,105 @@ +package x509bundle + +import ( + "sort" + "sync" + + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Set is a set of bundles, keyed by trust domain. +type Set struct { + mtx sync.RWMutex + bundles map[spiffeid.TrustDomain]*Bundle +} + +// NewSet creates a new set initialized with the given bundles. +func NewSet(bundles ...*Bundle) *Set { + bundlesMap := make(map[spiffeid.TrustDomain]*Bundle) + + for _, b := range bundles { + if b != nil { + bundlesMap[b.trustDomain] = b + } + } + + return &Set{ + bundles: bundlesMap, + } +} + +// Add adds a new bundle into the set. If a bundle already exists for the +// trust domain, the existing bundle is replaced. +func (s *Set) Add(bundle *Bundle) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if bundle != nil { + s.bundles[bundle.trustDomain] = bundle + } +} + +// Remove removes the bundle for the given trust domain. +func (s *Set) Remove(trustDomain spiffeid.TrustDomain) { + s.mtx.Lock() + defer s.mtx.Unlock() + + delete(s.bundles, trustDomain) +} + +// Has returns true if there is a bundle for the given trust domain. +func (s *Set) Has(trustDomain spiffeid.TrustDomain) bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + _, ok := s.bundles[trustDomain] + return ok +} + +// Get returns a bundle for the given trust domain. If the bundle is in the set +// it is returned and the boolean is true. Otherwise, the returned value is +// nil and the boolean is false. +func (s *Set) Get(trustDomain spiffeid.TrustDomain) (*Bundle, bool) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + return bundle, ok +} + +// Bundles returns the bundles in the set sorted by trust domain. +func (s *Set) Bundles() []*Bundle { + s.mtx.RLock() + defer s.mtx.RUnlock() + + out := make([]*Bundle, 0, len(s.bundles)) + for _, bundle := range s.bundles { + out = append(out, bundle) + } + sort.Slice(out, func(a, b int) bool { + return out[a].TrustDomain().Compare(out[b].TrustDomain()) < 0 + }) + return out +} + +// Len returns the number of bundles in the set. +func (s *Set) Len() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return len(s.bundles) +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the Source interface. +func (s *Set) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + bundle, ok := s.bundles[trustDomain] + if !ok { + return nil, x509bundleErr.New("no X.509 bundle for trust domain %q", trustDomain) + } + + return bundle, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/source.go b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/source.go new file mode 100644 index 00000000000..22446357bb1 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/bundle/x509bundle/source.go @@ -0,0 +1,12 @@ +package x509bundle + +import ( + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Source represents a source of X.509 bundles keyed by trust domain. +type Source interface { + // GetX509BundleForTrustDomain returns the X.509 bundle for the given trust + // domain. + GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*Bundle, error) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go b/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go new file mode 100644 index 00000000000..7b34480cd0f --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/internal/cryptoutil/keys.go @@ -0,0 +1,29 @@ +package cryptoutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "fmt" +) + +func PublicKeyEqual(a, b crypto.PublicKey) (bool, error) { + switch a := a.(type) { + case *rsa.PublicKey: + rsaPublicKey, ok := b.(*rsa.PublicKey) + return ok && RSAPublicKeyEqual(a, rsaPublicKey), nil + case *ecdsa.PublicKey: + ecdsaPublicKey, ok := b.(*ecdsa.PublicKey) + return ok && ECDSAPublicKeyEqual(a, ecdsaPublicKey), nil + default: + return false, fmt.Errorf("unsupported public key type %T", a) + } +} + +func RSAPublicKeyEqual(a, b *rsa.PublicKey) bool { + return a.E == b.E && a.N.Cmp(b.N) == 0 +} + +func ECDSAPublicKeyEqual(a, b *ecdsa.PublicKey) bool { + return a.Curve == b.Curve && a.X.Cmp(b.X) == 0 && a.Y.Cmp(b.Y) == 0 +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/internal/jwtutil/util.go b/vendor/github.com/spiffe/go-spiffe/v2/internal/jwtutil/util.go new file mode 100644 index 00000000000..86052797898 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/internal/jwtutil/util.go @@ -0,0 +1,34 @@ +package jwtutil + +import ( + "crypto" + + "github.com/spiffe/go-spiffe/v2/internal/cryptoutil" +) + +// CopyJWTAuthorities copies JWT authorities from a map to a new map. +func CopyJWTAuthorities(jwtAuthorities map[string]crypto.PublicKey) map[string]crypto.PublicKey { + copiedJWTAuthorities := make(map[string]crypto.PublicKey) + for key, jwtAuthority := range jwtAuthorities { + copiedJWTAuthorities[key] = jwtAuthority + } + return copiedJWTAuthorities +} + +func JWTAuthoritiesEqual(a, b map[string]crypto.PublicKey) bool { + if len(a) != len(b) { + return false + } + + for k, pka := range a { + pkb, ok := b[k] + if !ok { + return false + } + if equal, _ := cryptoutil.PublicKeyEqual(pka, pkb); !equal { + return false + } + } + + return true +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/internal/pemutil/pem.go b/vendor/github.com/spiffe/go-spiffe/v2/internal/pemutil/pem.go new file mode 100644 index 00000000000..26617525a31 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/internal/pemutil/pem.go @@ -0,0 +1,123 @@ +package pemutil + +import ( + "crypto" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +const ( + certType string = "CERTIFICATE" + keyType string = "PRIVATE KEY" +) + +func ParseCertificates(certsBytes []byte) ([]*x509.Certificate, error) { + objects, err := parseBlocks(certsBytes, certType) + if err != nil { + return nil, err + } + + certs := []*x509.Certificate{} + for _, object := range objects { + cert, ok := object.(*x509.Certificate) + if !ok { + return nil, fmt.Errorf("expected *x509.Certificate; got %T", object) + } + certs = append(certs, cert) + } + + return certs, nil +} + +func ParsePrivateKey(keyBytes []byte) (crypto.PrivateKey, error) { + objects, err := parseBlocks(keyBytes, keyType) + if err != nil { + return nil, err + } + if len(objects) == 0 { + return nil, nil + } + + privateKey, ok := objects[0].(crypto.PrivateKey) + if !ok { + return nil, fmt.Errorf("expected crypto.PrivateKey; got %T", objects[0]) + } + return privateKey, nil +} + +func EncodePKCS8PrivateKey(privateKey interface{}) ([]byte, error) { + keyBytes, err := x509.MarshalPKCS8PrivateKey(privateKey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: keyType, + Bytes: keyBytes, + }), nil +} + +func EncodeCertificates(certificates []*x509.Certificate) []byte { + pemBytes := []byte{} + for _, cert := range certificates { + pemBytes = append(pemBytes, pem.EncodeToMemory(&pem.Block{ + Type: certType, + Bytes: cert.Raw, + })...) + } + return pemBytes +} + +func parseBlocks(blocksBytes []byte, expectedType string) ([]interface{}, error) { + objects := []interface{}{} + var foundBlocks = false + for { + if len(blocksBytes) == 0 { + if len(objects) == 0 && !foundBlocks { + return nil, errors.New("no PEM blocks found") + } + return objects, nil + } + object, rest, foundBlock, err := parseBlock(blocksBytes, expectedType) + blocksBytes = rest + if foundBlock { + foundBlocks = true + } + switch { + case err != nil: + return nil, err + case object != nil: + objects = append(objects, object) + } + } +} + +func parseBlock(pemBytes []byte, pemType string) (interface{}, []byte, bool, error) { + pemBlock, rest := pem.Decode(pemBytes) + if pemBlock == nil { + return nil, nil, false, nil + } + + if pemBlock.Type != pemType { + return nil, rest, true, nil + } + + var object interface{} + var err error + switch pemType { + case certType: + object, err = x509.ParseCertificate(pemBlock.Bytes) + case keyType: + object, err = x509.ParsePKCS8PrivateKey(pemBlock.Bytes) + default: + err = fmt.Errorf("PEM type not supported: %q", pemType) + } + + if err != nil { + return nil, nil, false, err + } + + return object, rest, true, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/internal/x509util/util.go b/vendor/github.com/spiffe/go-spiffe/v2/internal/x509util/util.go new file mode 100644 index 00000000000..c45288d0f6c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/internal/x509util/util.go @@ -0,0 +1,53 @@ +package x509util + +import ( + "crypto/x509" +) + +// NewCertPool returns a new CertPool with the given X.509 certificates +func NewCertPool(certs []*x509.Certificate) *x509.CertPool { + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool +} + +// CopyX509Authorities copies a slice of X.509 certificates to a new slice. +func CopyX509Authorities(x509Authorities []*x509.Certificate) []*x509.Certificate { + copiedX509Authorities := make([]*x509.Certificate, len(x509Authorities)) + copy(copiedX509Authorities, x509Authorities) + + return copiedX509Authorities +} + +// CertsEqual returns true if the slices of X.509 certificates are equal. +func CertsEqual(a, b []*x509.Certificate) bool { + if len(a) != len(b) { + return false + } + + for i, cert := range a { + if !cert.Equal(b[i]) { + return false + } + } + + return true +} + +func RawCertsFromCerts(certs []*x509.Certificate) [][]byte { + rawCerts := make([][]byte, 0, len(certs)) + for _, cert := range certs { + rawCerts = append(rawCerts, cert.Raw) + } + return rawCerts +} + +func ConcatRawCertsFromCerts(certs []*x509.Certificate) []byte { + var rawCerts []byte + for _, cert := range certs { + rawCerts = append(rawCerts, cert.Raw...) + } + return rawCerts +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/logger/logger.go b/vendor/github.com/spiffe/go-spiffe/v2/logger/logger.go new file mode 100644 index 00000000000..b511eb203ef --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/logger/logger.go @@ -0,0 +1,9 @@ +package logger + +// Logger provides logging facilities to the library. +type Logger interface { + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/logger/null.go b/vendor/github.com/spiffe/go-spiffe/v2/logger/null.go new file mode 100644 index 00000000000..e6ac6876dff --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/logger/null.go @@ -0,0 +1,12 @@ +package logger + +// Null is a no-op logger. It is used to suppress logging and is the default +// logger for the library. +var Null Logger = nullLogger{} + +type nullLogger struct{} + +func (nullLogger) Debugf(format string, args ...interface{}) {} +func (nullLogger) Infof(format string, args ...interface{}) {} +func (nullLogger) Warnf(format string, args ...interface{}) {} +func (nullLogger) Errorf(format string, args ...interface{}) {} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/logger/std.go b/vendor/github.com/spiffe/go-spiffe/v2/logger/std.go new file mode 100644 index 00000000000..8150ee4cb4c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/logger/std.go @@ -0,0 +1,24 @@ +package logger + +import "log" + +// Std is a logger that uses the Go standard log library. +var Std Logger = stdLogger{} + +type stdLogger struct{} + +func (stdLogger) Debugf(format string, args ...interface{}) { + log.Printf("[DEBUG] "+format+"\n", args...) +} + +func (stdLogger) Infof(format string, args ...interface{}) { + log.Printf("[INFO] "+format+"\n", args...) +} + +func (stdLogger) Warnf(format string, args ...interface{}) { + log.Printf("[WARN] "+format+"\n", args...) +} + +func (stdLogger) Errorf(format string, args ...interface{}) { + log.Printf("[ERROR] "+format+"\n", args...) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/logger/writer.go b/vendor/github.com/spiffe/go-spiffe/v2/logger/writer.go new file mode 100644 index 00000000000..2eac515110c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/logger/writer.go @@ -0,0 +1,31 @@ +package logger + +import ( + "fmt" + "io" +) + +// Writer provides a logger that outputs logging to the given writer. +func Writer(w io.Writer) Logger { + return writer{Writer: w} +} + +type writer struct { + io.Writer +} + +func (w writer) Debugf(format string, args ...interface{}) { + fmt.Fprintf(w.Writer, "[DEBUG] "+format+"\n", args...) +} + +func (w writer) Infof(format string, args ...interface{}) { + fmt.Fprintf(w.Writer, "[INFO] "+format+"\n", args...) +} + +func (w writer) Warnf(format string, args ...interface{}) { + fmt.Fprintf(w.Writer, "[WARN] "+format+"\n", args...) +} + +func (w writer) Errorf(format string, args ...interface{}) { + fmt.Fprintf(w.Writer, "[ERROR] "+format+"\n", args...) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go new file mode 100644 index 00000000000..e85bb137acd --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.pb.go @@ -0,0 +1,1016 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: workload.proto + +package workload + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The X509SVIDRequest message conveys parameters for requesting an X.509-SVID. +// There are currently no request parameters. +type X509SVIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *X509SVIDRequest) Reset() { + *x = X509SVIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509SVIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509SVIDRequest) ProtoMessage() {} + +func (x *X509SVIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509SVIDRequest.ProtoReflect.Descriptor instead. +func (*X509SVIDRequest) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{0} +} + +// The X509SVIDResponse message carries X.509-SVIDs and related information, +// including a set of global CRLs and a list of bundles the workload may use +// for federating with foreign trust domains. +type X509SVIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. A list of X509SVID messages, each of which includes a single + // X.509-SVID, its private key, and the bundle for the trust domain. + Svids []*X509SVID `protobuf:"bytes,1,rep,name=svids,proto3" json:"svids,omitempty"` + // Optional. ASN.1 DER encoded certificate revocation lists. + Crl [][]byte `protobuf:"bytes,2,rep,name=crl,proto3" json:"crl,omitempty"` + // Optional. CA certificate bundles belonging to foreign trust domains that + // the workload should trust, keyed by the SPIFFE ID of the foreign trust + // domain. Bundles are ASN.1 DER encoded. + FederatedBundles map[string][]byte `protobuf:"bytes,3,rep,name=federated_bundles,json=federatedBundles,proto3" json:"federated_bundles,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *X509SVIDResponse) Reset() { + *x = X509SVIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509SVIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509SVIDResponse) ProtoMessage() {} + +func (x *X509SVIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509SVIDResponse.ProtoReflect.Descriptor instead. +func (*X509SVIDResponse) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{1} +} + +func (x *X509SVIDResponse) GetSvids() []*X509SVID { + if x != nil { + return x.Svids + } + return nil +} + +func (x *X509SVIDResponse) GetCrl() [][]byte { + if x != nil { + return x.Crl + } + return nil +} + +func (x *X509SVIDResponse) GetFederatedBundles() map[string][]byte { + if x != nil { + return x.FederatedBundles + } + return nil +} + +// The X509SVID message carries a single SVID and all associated information, +// including the X.509 bundle for the trust domain. +type X509SVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The SPIFFE ID of the SVID in this entry + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // Required. ASN.1 DER encoded certificate chain. MAY include + // intermediates, the leaf certificate (or SVID itself) MUST come first. + X509Svid []byte `protobuf:"bytes,2,opt,name=x509_svid,json=x509Svid,proto3" json:"x509_svid,omitempty"` + // Required. ASN.1 DER encoded PKCS#8 private key. MUST be unencrypted. + X509SvidKey []byte `protobuf:"bytes,3,opt,name=x509_svid_key,json=x509SvidKey,proto3" json:"x509_svid_key,omitempty"` + // Required. ASN.1 DER encoded X.509 bundle for the trust domain. + Bundle []byte `protobuf:"bytes,4,opt,name=bundle,proto3" json:"bundle,omitempty"` +} + +func (x *X509SVID) Reset() { + *x = X509SVID{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509SVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509SVID) ProtoMessage() {} + +func (x *X509SVID) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509SVID.ProtoReflect.Descriptor instead. +func (*X509SVID) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{2} +} + +func (x *X509SVID) GetSpiffeId() string { + if x != nil { + return x.SpiffeId + } + return "" +} + +func (x *X509SVID) GetX509Svid() []byte { + if x != nil { + return x.X509Svid + } + return nil +} + +func (x *X509SVID) GetX509SvidKey() []byte { + if x != nil { + return x.X509SvidKey + } + return nil +} + +func (x *X509SVID) GetBundle() []byte { + if x != nil { + return x.Bundle + } + return nil +} + +// The X509BundlesRequest message conveys parameters for requesting X.509 +// bundles. There are currently no such parameters. +type X509BundlesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *X509BundlesRequest) Reset() { + *x = X509BundlesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509BundlesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509BundlesRequest) ProtoMessage() {} + +func (x *X509BundlesRequest) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509BundlesRequest.ProtoReflect.Descriptor instead. +func (*X509BundlesRequest) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{3} +} + +// The X509BundlesResponse message carries a set of global CRLs and a map of +// trust bundles the workload should trust. +type X509BundlesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Optional. ASN.1 DER encoded certificate revocation lists. + Crl [][]byte `protobuf:"bytes,1,rep,name=crl,proto3" json:"crl,omitempty"` + // Required. CA certificate bundles belonging to trust domains that the + // workload should trust, keyed by the SPIFFE ID of the trust domain. + // Bundles are ASN.1 DER encoded. + Bundles map[string][]byte `protobuf:"bytes,2,rep,name=bundles,proto3" json:"bundles,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *X509BundlesResponse) Reset() { + *x = X509BundlesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509BundlesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509BundlesResponse) ProtoMessage() {} + +func (x *X509BundlesResponse) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509BundlesResponse.ProtoReflect.Descriptor instead. +func (*X509BundlesResponse) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{4} +} + +func (x *X509BundlesResponse) GetCrl() [][]byte { + if x != nil { + return x.Crl + } + return nil +} + +func (x *X509BundlesResponse) GetBundles() map[string][]byte { + if x != nil { + return x.Bundles + } + return nil +} + +type JWTSVIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The audience(s) the workload intends to authenticate against. + Audience []string `protobuf:"bytes,1,rep,name=audience,proto3" json:"audience,omitempty"` + // Optional. The requested SPIFFE ID for the JWT-SVID. If unset, all + // JWT-SVIDs to which the workload is entitled are requested. + SpiffeId string `protobuf:"bytes,2,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` +} + +func (x *JWTSVIDRequest) Reset() { + *x = JWTSVIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTSVIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTSVIDRequest) ProtoMessage() {} + +func (x *JWTSVIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTSVIDRequest.ProtoReflect.Descriptor instead. +func (*JWTSVIDRequest) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{5} +} + +func (x *JWTSVIDRequest) GetAudience() []string { + if x != nil { + return x.Audience + } + return nil +} + +func (x *JWTSVIDRequest) GetSpiffeId() string { + if x != nil { + return x.SpiffeId + } + return "" +} + +// The JWTSVIDResponse message conveys JWT-SVIDs. +type JWTSVIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The list of returned JWT-SVIDs. + Svids []*JWTSVID `protobuf:"bytes,1,rep,name=svids,proto3" json:"svids,omitempty"` +} + +func (x *JWTSVIDResponse) Reset() { + *x = JWTSVIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTSVIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTSVIDResponse) ProtoMessage() {} + +func (x *JWTSVIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTSVIDResponse.ProtoReflect.Descriptor instead. +func (*JWTSVIDResponse) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{6} +} + +func (x *JWTSVIDResponse) GetSvids() []*JWTSVID { + if x != nil { + return x.Svids + } + return nil +} + +// The JWTSVID message carries the JWT-SVID token and associated metadata. +type JWTSVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The SPIFFE ID of the JWT-SVID. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // Required. Encoded JWT using JWS Compact Serialization. + Svid string `protobuf:"bytes,2,opt,name=svid,proto3" json:"svid,omitempty"` +} + +func (x *JWTSVID) Reset() { + *x = JWTSVID{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTSVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTSVID) ProtoMessage() {} + +func (x *JWTSVID) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTSVID.ProtoReflect.Descriptor instead. +func (*JWTSVID) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{7} +} + +func (x *JWTSVID) GetSpiffeId() string { + if x != nil { + return x.SpiffeId + } + return "" +} + +func (x *JWTSVID) GetSvid() string { + if x != nil { + return x.Svid + } + return "" +} + +// The JWTBundlesRequest message conveys parameters for requesting JWT bundles. +// There are currently no such parameters. +type JWTBundlesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *JWTBundlesRequest) Reset() { + *x = JWTBundlesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTBundlesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTBundlesRequest) ProtoMessage() {} + +func (x *JWTBundlesRequest) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTBundlesRequest.ProtoReflect.Descriptor instead. +func (*JWTBundlesRequest) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{8} +} + +// The JWTBundlesReponse conveys JWT bundles. +type JWTBundlesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. JWK encoded JWT bundles, keyed by the SPIFFE ID of the trust + // domain. + Bundles map[string][]byte `protobuf:"bytes,1,rep,name=bundles,proto3" json:"bundles,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *JWTBundlesResponse) Reset() { + *x = JWTBundlesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTBundlesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTBundlesResponse) ProtoMessage() {} + +func (x *JWTBundlesResponse) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTBundlesResponse.ProtoReflect.Descriptor instead. +func (*JWTBundlesResponse) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{9} +} + +func (x *JWTBundlesResponse) GetBundles() map[string][]byte { + if x != nil { + return x.Bundles + } + return nil +} + +// The ValidateJWTSVIDRequest message conveys request parameters for +// JWT-SVID validation. +type ValidateJWTSVIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The audience of the validating party. The JWT-SVID must + // contain an audience claim which contains this value in order to + // succesfully validate. + Audience string `protobuf:"bytes,1,opt,name=audience,proto3" json:"audience,omitempty"` + // Required. The JWT-SVID to validate, encoded using JWS Compact + // Serialization. + Svid string `protobuf:"bytes,2,opt,name=svid,proto3" json:"svid,omitempty"` +} + +func (x *ValidateJWTSVIDRequest) Reset() { + *x = ValidateJWTSVIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateJWTSVIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateJWTSVIDRequest) ProtoMessage() {} + +func (x *ValidateJWTSVIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateJWTSVIDRequest.ProtoReflect.Descriptor instead. +func (*ValidateJWTSVIDRequest) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidateJWTSVIDRequest) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +func (x *ValidateJWTSVIDRequest) GetSvid() string { + if x != nil { + return x.Svid + } + return "" +} + +// The ValidateJWTSVIDReponse message conveys the JWT-SVID validation results. +type ValidateJWTSVIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The SPIFFE ID of the validated JWT-SVID. + SpiffeId string `protobuf:"bytes,1,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // Optional. Arbitrary claims contained within the payload of the validated + // JWT-SVID. + Claims *structpb.Struct `protobuf:"bytes,2,opt,name=claims,proto3" json:"claims,omitempty"` +} + +func (x *ValidateJWTSVIDResponse) Reset() { + *x = ValidateJWTSVIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_workload_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ValidateJWTSVIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateJWTSVIDResponse) ProtoMessage() {} + +func (x *ValidateJWTSVIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_workload_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateJWTSVIDResponse.ProtoReflect.Descriptor instead. +func (*ValidateJWTSVIDResponse) Descriptor() ([]byte, []int) { + return file_workload_proto_rawDescGZIP(), []int{11} +} + +func (x *ValidateJWTSVIDResponse) GetSpiffeId() string { + if x != nil { + return x.SpiffeId + } + return "" +} + +func (x *ValidateJWTSVIDResponse) GetClaims() *structpb.Struct { + if x != nil { + return x.Claims + } + return nil +} + +var File_workload_proto protoreflect.FileDescriptor + +var file_workload_proto_rawDesc = []byte{ + 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x11, + 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0xe0, 0x01, 0x0a, 0x10, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, + 0x52, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x54, 0x0a, 0x11, 0x66, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x66, + 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x1a, + 0x43, 0x0a, 0x15, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x80, 0x01, 0x0a, 0x08, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, + 0x44, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x08, 0x78, 0x35, 0x30, 0x39, 0x53, 0x76, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x78, + 0x35, 0x30, 0x39, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0b, 0x78, 0x35, 0x30, 0x39, 0x53, 0x76, 0x69, 0x64, 0x4b, 0x65, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x58, 0x35, 0x30, 0x39, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xa0, 0x01, + 0x0a, 0x13, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0c, 0x52, 0x03, 0x63, 0x72, 0x6c, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x49, 0x0a, 0x0e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, + 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x22, 0x31, 0x0a, 0x0f, 0x4a, + 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, + 0x0a, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, + 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x05, 0x73, 0x76, 0x69, 0x64, 0x73, 0x22, 0x3a, + 0x0a, 0x07, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, + 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, + 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, 0x22, 0x13, 0x0a, 0x11, 0x4a, 0x57, + 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x8c, 0x01, 0x0a, 0x12, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x42, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x48, + 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, + 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, + 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, + 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x76, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x73, 0x76, 0x69, 0x64, 0x22, 0x67, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x61, 0x69, 0x6d, + 0x73, 0x32, 0xc3, 0x02, 0x0a, 0x11, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x57, 0x6f, 0x72, 0x6b, + 0x6c, 0x6f, 0x61, 0x64, 0x41, 0x50, 0x49, 0x12, 0x36, 0x0a, 0x0d, 0x46, 0x65, 0x74, 0x63, 0x68, + 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x12, 0x10, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x53, + 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x58, 0x35, 0x30, + 0x39, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, + 0x3f, 0x0a, 0x10, 0x46, 0x65, 0x74, 0x63, 0x68, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x73, 0x12, 0x13, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, + 0x12, 0x31, 0x0a, 0x0c, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, + 0x12, 0x0f, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x10, 0x2e, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0f, 0x46, 0x65, 0x74, 0x63, 0x68, 0x4a, 0x57, 0x54, 0x42, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x12, 0x2e, 0x4a, 0x57, 0x54, 0x42, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x4a, 0x57, 0x54, + 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, + 0x01, 0x12, 0x44, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, + 0x53, 0x56, 0x49, 0x44, 0x12, 0x17, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, + 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3f, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x67, 0x6f, 0x2d, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x3b, + 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_workload_proto_rawDescOnce sync.Once + file_workload_proto_rawDescData = file_workload_proto_rawDesc +) + +func file_workload_proto_rawDescGZIP() []byte { + file_workload_proto_rawDescOnce.Do(func() { + file_workload_proto_rawDescData = protoimpl.X.CompressGZIP(file_workload_proto_rawDescData) + }) + return file_workload_proto_rawDescData +} + +var file_workload_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_workload_proto_goTypes = []interface{}{ + (*X509SVIDRequest)(nil), // 0: X509SVIDRequest + (*X509SVIDResponse)(nil), // 1: X509SVIDResponse + (*X509SVID)(nil), // 2: X509SVID + (*X509BundlesRequest)(nil), // 3: X509BundlesRequest + (*X509BundlesResponse)(nil), // 4: X509BundlesResponse + (*JWTSVIDRequest)(nil), // 5: JWTSVIDRequest + (*JWTSVIDResponse)(nil), // 6: JWTSVIDResponse + (*JWTSVID)(nil), // 7: JWTSVID + (*JWTBundlesRequest)(nil), // 8: JWTBundlesRequest + (*JWTBundlesResponse)(nil), // 9: JWTBundlesResponse + (*ValidateJWTSVIDRequest)(nil), // 10: ValidateJWTSVIDRequest + (*ValidateJWTSVIDResponse)(nil), // 11: ValidateJWTSVIDResponse + nil, // 12: X509SVIDResponse.FederatedBundlesEntry + nil, // 13: X509BundlesResponse.BundlesEntry + nil, // 14: JWTBundlesResponse.BundlesEntry + (*structpb.Struct)(nil), // 15: google.protobuf.Struct +} +var file_workload_proto_depIdxs = []int32{ + 2, // 0: X509SVIDResponse.svids:type_name -> X509SVID + 12, // 1: X509SVIDResponse.federated_bundles:type_name -> X509SVIDResponse.FederatedBundlesEntry + 13, // 2: X509BundlesResponse.bundles:type_name -> X509BundlesResponse.BundlesEntry + 7, // 3: JWTSVIDResponse.svids:type_name -> JWTSVID + 14, // 4: JWTBundlesResponse.bundles:type_name -> JWTBundlesResponse.BundlesEntry + 15, // 5: ValidateJWTSVIDResponse.claims:type_name -> google.protobuf.Struct + 0, // 6: SpiffeWorkloadAPI.FetchX509SVID:input_type -> X509SVIDRequest + 3, // 7: SpiffeWorkloadAPI.FetchX509Bundles:input_type -> X509BundlesRequest + 5, // 8: SpiffeWorkloadAPI.FetchJWTSVID:input_type -> JWTSVIDRequest + 8, // 9: SpiffeWorkloadAPI.FetchJWTBundles:input_type -> JWTBundlesRequest + 10, // 10: SpiffeWorkloadAPI.ValidateJWTSVID:input_type -> ValidateJWTSVIDRequest + 1, // 11: SpiffeWorkloadAPI.FetchX509SVID:output_type -> X509SVIDResponse + 4, // 12: SpiffeWorkloadAPI.FetchX509Bundles:output_type -> X509BundlesResponse + 6, // 13: SpiffeWorkloadAPI.FetchJWTSVID:output_type -> JWTSVIDResponse + 9, // 14: SpiffeWorkloadAPI.FetchJWTBundles:output_type -> JWTBundlesResponse + 11, // 15: SpiffeWorkloadAPI.ValidateJWTSVID:output_type -> ValidateJWTSVIDResponse + 11, // [11:16] is the sub-list for method output_type + 6, // [6:11] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_workload_proto_init() } +func file_workload_proto_init() { + if File_workload_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_workload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509SVIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509SVIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509SVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509BundlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509BundlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTSVIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTSVIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTSVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTBundlesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTBundlesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateJWTSVIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_workload_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ValidateJWTSVIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_workload_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_workload_proto_goTypes, + DependencyIndexes: file_workload_proto_depIdxs, + MessageInfos: file_workload_proto_msgTypes, + }.Build() + File_workload_proto = out.File + file_workload_proto_rawDesc = nil + file_workload_proto_goTypes = nil + file_workload_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto new file mode 100644 index 00000000000..64b66c8b6f6 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +import "google/protobuf/struct.proto"; + +service SpiffeWorkloadAPI { + ///////////////////////////////////////////////////////////////////////// + // X509-SVID Profile + ///////////////////////////////////////////////////////////////////////// + + // Fetch X.509-SVIDs for all SPIFFE identities the workload is entitled to, + // as well as related information like trust bundles and CRLs. As this + // information changes, subsequent messages will be streamed from the + // server. + rpc FetchX509SVID(X509SVIDRequest) returns (stream X509SVIDResponse); + + // Fetch trust bundles and CRLs. Useful for clients that only need to + // validate SVIDs without obtaining an SVID for themself. As this + // information changes, subsequent messages will be streamed from the + // server. + rpc FetchX509Bundles(X509BundlesRequest) returns (stream X509BundlesResponse); + + ///////////////////////////////////////////////////////////////////////// + // JWT-SVID Profile + ///////////////////////////////////////////////////////////////////////// + + // Fetch JWT-SVIDs for all SPIFFE identities the workload is entitled to, + // for the requested audience. If an optional SPIFFE ID is requested, only + // the JWT-SVID for that SPIFFE ID is returned. + rpc FetchJWTSVID(JWTSVIDRequest) returns (JWTSVIDResponse); + + // Fetches the JWT bundles, formatted as JWKS documents, keyed by the + // SPIFFE ID of the trust domain. As this information changes, subsequent + // messages will be streamed from the server. + rpc FetchJWTBundles(JWTBundlesRequest) returns (stream JWTBundlesResponse); + + // Validates a JWT-SVID against the requested audience. Returns the SPIFFE + // ID of the JWT-SVID and JWT claims. + rpc ValidateJWTSVID(ValidateJWTSVIDRequest) returns (ValidateJWTSVIDResponse); +} + +// The X509SVIDRequest message conveys parameters for requesting an X.509-SVID. +// There are currently no request parameters. +message X509SVIDRequest { } + +// The X509SVIDResponse message carries X.509-SVIDs and related information, +// including a set of global CRLs and a list of bundles the workload may use +// for federating with foreign trust domains. +message X509SVIDResponse { + // Required. A list of X509SVID messages, each of which includes a single + // X.509-SVID, its private key, and the bundle for the trust domain. + repeated X509SVID svids = 1; + + // Optional. ASN.1 DER encoded certificate revocation lists. + repeated bytes crl = 2; + + // Optional. CA certificate bundles belonging to foreign trust domains that + // the workload should trust, keyed by the SPIFFE ID of the foreign trust + // domain. Bundles are ASN.1 DER encoded. + map federated_bundles = 3; +} + +// The X509SVID message carries a single SVID and all associated information, +// including the X.509 bundle for the trust domain. +message X509SVID { + // Required. The SPIFFE ID of the SVID in this entry + string spiffe_id = 1; + + // Required. ASN.1 DER encoded certificate chain. MAY include + // intermediates, the leaf certificate (or SVID itself) MUST come first. + bytes x509_svid = 2; + + // Required. ASN.1 DER encoded PKCS#8 private key. MUST be unencrypted. + bytes x509_svid_key = 3; + + // Required. ASN.1 DER encoded X.509 bundle for the trust domain. + bytes bundle = 4; +} + +// The X509BundlesRequest message conveys parameters for requesting X.509 +// bundles. There are currently no such parameters. +message X509BundlesRequest { +} + +// The X509BundlesResponse message carries a set of global CRLs and a map of +// trust bundles the workload should trust. +message X509BundlesResponse { + // Optional. ASN.1 DER encoded certificate revocation lists. + repeated bytes crl = 1; + + // Required. CA certificate bundles belonging to trust domains that the + // workload should trust, keyed by the SPIFFE ID of the trust domain. + // Bundles are ASN.1 DER encoded. + map bundles = 2; +} + +message JWTSVIDRequest { + // Required. The audience(s) the workload intends to authenticate against. + repeated string audience = 1; + + // Optional. The requested SPIFFE ID for the JWT-SVID. If unset, all + // JWT-SVIDs to which the workload is entitled are requested. + string spiffe_id = 2; +} + +// The JWTSVIDResponse message conveys JWT-SVIDs. +message JWTSVIDResponse { + // Required. The list of returned JWT-SVIDs. + repeated JWTSVID svids = 1; +} + +// The JWTSVID message carries the JWT-SVID token and associated metadata. +message JWTSVID { + // Required. The SPIFFE ID of the JWT-SVID. + string spiffe_id = 1; + + // Required. Encoded JWT using JWS Compact Serialization. + string svid = 2; +} + +// The JWTBundlesRequest message conveys parameters for requesting JWT bundles. +// There are currently no such parameters. +message JWTBundlesRequest { } + +// The JWTBundlesReponse conveys JWT bundles. +message JWTBundlesResponse { + // Required. JWK encoded JWT bundles, keyed by the SPIFFE ID of the trust + // domain. + map bundles = 1; +} + +// The ValidateJWTSVIDRequest message conveys request parameters for +// JWT-SVID validation. +message ValidateJWTSVIDRequest { + // Required. The audience of the validating party. The JWT-SVID must + // contain an audience claim which contains this value in order to + // succesfully validate. + string audience = 1; + + // Required. The JWT-SVID to validate, encoded using JWS Compact + // Serialization. + string svid = 2; +} + +// The ValidateJWTSVIDReponse message conveys the JWT-SVID validation results. +message ValidateJWTSVIDResponse { + // Required. The SPIFFE ID of the validated JWT-SVID. + string spiffe_id = 1; + + // Optional. Arbitrary claims contained within the payload of the validated + // JWT-SVID. + google.protobuf.Struct claims = 2; +} + +option go_package = "github.com/spiffe/go-spiffe/v2/proto/spiffe/workload;workload"; diff --git a/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload_grpc.pb.go b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload_grpc.pb.go new file mode 100644 index 00000000000..4dcb38736c7 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/proto/spiffe/workload/workload_grpc.pb.go @@ -0,0 +1,355 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package workload + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// SpiffeWorkloadAPIClient is the client API for SpiffeWorkloadAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SpiffeWorkloadAPIClient interface { + // Fetch X.509-SVIDs for all SPIFFE identities the workload is entitled to, + // as well as related information like trust bundles and CRLs. As this + // information changes, subsequent messages will be streamed from the + // server. + FetchX509SVID(ctx context.Context, in *X509SVIDRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchX509SVIDClient, error) + // Fetch trust bundles and CRLs. Useful for clients that only need to + // validate SVIDs without obtaining an SVID for themself. As this + // information changes, subsequent messages will be streamed from the + // server. + FetchX509Bundles(ctx context.Context, in *X509BundlesRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchX509BundlesClient, error) + // Fetch JWT-SVIDs for all SPIFFE identities the workload is entitled to, + // for the requested audience. If an optional SPIFFE ID is requested, only + // the JWT-SVID for that SPIFFE ID is returned. + FetchJWTSVID(ctx context.Context, in *JWTSVIDRequest, opts ...grpc.CallOption) (*JWTSVIDResponse, error) + // Fetches the JWT bundles, formatted as JWKS documents, keyed by the + // SPIFFE ID of the trust domain. As this information changes, subsequent + // messages will be streamed from the server. + FetchJWTBundles(ctx context.Context, in *JWTBundlesRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchJWTBundlesClient, error) + // Validates a JWT-SVID against the requested audience. Returns the SPIFFE + // ID of the JWT-SVID and JWT claims. + ValidateJWTSVID(ctx context.Context, in *ValidateJWTSVIDRequest, opts ...grpc.CallOption) (*ValidateJWTSVIDResponse, error) +} + +type spiffeWorkloadAPIClient struct { + cc grpc.ClientConnInterface +} + +func NewSpiffeWorkloadAPIClient(cc grpc.ClientConnInterface) SpiffeWorkloadAPIClient { + return &spiffeWorkloadAPIClient{cc} +} + +func (c *spiffeWorkloadAPIClient) FetchX509SVID(ctx context.Context, in *X509SVIDRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchX509SVIDClient, error) { + stream, err := c.cc.NewStream(ctx, &_SpiffeWorkloadAPI_serviceDesc.Streams[0], "/SpiffeWorkloadAPI/FetchX509SVID", opts...) + if err != nil { + return nil, err + } + x := &spiffeWorkloadAPIFetchX509SVIDClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SpiffeWorkloadAPI_FetchX509SVIDClient interface { + Recv() (*X509SVIDResponse, error) + grpc.ClientStream +} + +type spiffeWorkloadAPIFetchX509SVIDClient struct { + grpc.ClientStream +} + +func (x *spiffeWorkloadAPIFetchX509SVIDClient) Recv() (*X509SVIDResponse, error) { + m := new(X509SVIDResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spiffeWorkloadAPIClient) FetchX509Bundles(ctx context.Context, in *X509BundlesRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchX509BundlesClient, error) { + stream, err := c.cc.NewStream(ctx, &_SpiffeWorkloadAPI_serviceDesc.Streams[1], "/SpiffeWorkloadAPI/FetchX509Bundles", opts...) + if err != nil { + return nil, err + } + x := &spiffeWorkloadAPIFetchX509BundlesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SpiffeWorkloadAPI_FetchX509BundlesClient interface { + Recv() (*X509BundlesResponse, error) + grpc.ClientStream +} + +type spiffeWorkloadAPIFetchX509BundlesClient struct { + grpc.ClientStream +} + +func (x *spiffeWorkloadAPIFetchX509BundlesClient) Recv() (*X509BundlesResponse, error) { + m := new(X509BundlesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spiffeWorkloadAPIClient) FetchJWTSVID(ctx context.Context, in *JWTSVIDRequest, opts ...grpc.CallOption) (*JWTSVIDResponse, error) { + out := new(JWTSVIDResponse) + err := c.cc.Invoke(ctx, "/SpiffeWorkloadAPI/FetchJWTSVID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *spiffeWorkloadAPIClient) FetchJWTBundles(ctx context.Context, in *JWTBundlesRequest, opts ...grpc.CallOption) (SpiffeWorkloadAPI_FetchJWTBundlesClient, error) { + stream, err := c.cc.NewStream(ctx, &_SpiffeWorkloadAPI_serviceDesc.Streams[2], "/SpiffeWorkloadAPI/FetchJWTBundles", opts...) + if err != nil { + return nil, err + } + x := &spiffeWorkloadAPIFetchJWTBundlesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type SpiffeWorkloadAPI_FetchJWTBundlesClient interface { + Recv() (*JWTBundlesResponse, error) + grpc.ClientStream +} + +type spiffeWorkloadAPIFetchJWTBundlesClient struct { + grpc.ClientStream +} + +func (x *spiffeWorkloadAPIFetchJWTBundlesClient) Recv() (*JWTBundlesResponse, error) { + m := new(JWTBundlesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *spiffeWorkloadAPIClient) ValidateJWTSVID(ctx context.Context, in *ValidateJWTSVIDRequest, opts ...grpc.CallOption) (*ValidateJWTSVIDResponse, error) { + out := new(ValidateJWTSVIDResponse) + err := c.cc.Invoke(ctx, "/SpiffeWorkloadAPI/ValidateJWTSVID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SpiffeWorkloadAPIServer is the server API for SpiffeWorkloadAPI service. +// All implementations must embed UnimplementedSpiffeWorkloadAPIServer +// for forward compatibility +type SpiffeWorkloadAPIServer interface { + // Fetch X.509-SVIDs for all SPIFFE identities the workload is entitled to, + // as well as related information like trust bundles and CRLs. As this + // information changes, subsequent messages will be streamed from the + // server. + FetchX509SVID(*X509SVIDRequest, SpiffeWorkloadAPI_FetchX509SVIDServer) error + // Fetch trust bundles and CRLs. Useful for clients that only need to + // validate SVIDs without obtaining an SVID for themself. As this + // information changes, subsequent messages will be streamed from the + // server. + FetchX509Bundles(*X509BundlesRequest, SpiffeWorkloadAPI_FetchX509BundlesServer) error + // Fetch JWT-SVIDs for all SPIFFE identities the workload is entitled to, + // for the requested audience. If an optional SPIFFE ID is requested, only + // the JWT-SVID for that SPIFFE ID is returned. + FetchJWTSVID(context.Context, *JWTSVIDRequest) (*JWTSVIDResponse, error) + // Fetches the JWT bundles, formatted as JWKS documents, keyed by the + // SPIFFE ID of the trust domain. As this information changes, subsequent + // messages will be streamed from the server. + FetchJWTBundles(*JWTBundlesRequest, SpiffeWorkloadAPI_FetchJWTBundlesServer) error + // Validates a JWT-SVID against the requested audience. Returns the SPIFFE + // ID of the JWT-SVID and JWT claims. + ValidateJWTSVID(context.Context, *ValidateJWTSVIDRequest) (*ValidateJWTSVIDResponse, error) + mustEmbedUnimplementedSpiffeWorkloadAPIServer() +} + +// UnimplementedSpiffeWorkloadAPIServer must be embedded to have forward compatible implementations. +type UnimplementedSpiffeWorkloadAPIServer struct { +} + +func (UnimplementedSpiffeWorkloadAPIServer) FetchX509SVID(*X509SVIDRequest, SpiffeWorkloadAPI_FetchX509SVIDServer) error { + return status.Errorf(codes.Unimplemented, "method FetchX509SVID not implemented") +} +func (UnimplementedSpiffeWorkloadAPIServer) FetchX509Bundles(*X509BundlesRequest, SpiffeWorkloadAPI_FetchX509BundlesServer) error { + return status.Errorf(codes.Unimplemented, "method FetchX509Bundles not implemented") +} +func (UnimplementedSpiffeWorkloadAPIServer) FetchJWTSVID(context.Context, *JWTSVIDRequest) (*JWTSVIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method FetchJWTSVID not implemented") +} +func (UnimplementedSpiffeWorkloadAPIServer) FetchJWTBundles(*JWTBundlesRequest, SpiffeWorkloadAPI_FetchJWTBundlesServer) error { + return status.Errorf(codes.Unimplemented, "method FetchJWTBundles not implemented") +} +func (UnimplementedSpiffeWorkloadAPIServer) ValidateJWTSVID(context.Context, *ValidateJWTSVIDRequest) (*ValidateJWTSVIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ValidateJWTSVID not implemented") +} +func (UnimplementedSpiffeWorkloadAPIServer) mustEmbedUnimplementedSpiffeWorkloadAPIServer() {} + +// UnsafeSpiffeWorkloadAPIServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SpiffeWorkloadAPIServer will +// result in compilation errors. +type UnsafeSpiffeWorkloadAPIServer interface { + mustEmbedUnimplementedSpiffeWorkloadAPIServer() +} + +func RegisterSpiffeWorkloadAPIServer(s grpc.ServiceRegistrar, srv SpiffeWorkloadAPIServer) { + s.RegisterService(&_SpiffeWorkloadAPI_serviceDesc, srv) +} + +func _SpiffeWorkloadAPI_FetchX509SVID_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(X509SVIDRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpiffeWorkloadAPIServer).FetchX509SVID(m, &spiffeWorkloadAPIFetchX509SVIDServer{stream}) +} + +type SpiffeWorkloadAPI_FetchX509SVIDServer interface { + Send(*X509SVIDResponse) error + grpc.ServerStream +} + +type spiffeWorkloadAPIFetchX509SVIDServer struct { + grpc.ServerStream +} + +func (x *spiffeWorkloadAPIFetchX509SVIDServer) Send(m *X509SVIDResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SpiffeWorkloadAPI_FetchX509Bundles_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(X509BundlesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpiffeWorkloadAPIServer).FetchX509Bundles(m, &spiffeWorkloadAPIFetchX509BundlesServer{stream}) +} + +type SpiffeWorkloadAPI_FetchX509BundlesServer interface { + Send(*X509BundlesResponse) error + grpc.ServerStream +} + +type spiffeWorkloadAPIFetchX509BundlesServer struct { + grpc.ServerStream +} + +func (x *spiffeWorkloadAPIFetchX509BundlesServer) Send(m *X509BundlesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SpiffeWorkloadAPI_FetchJWTSVID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JWTSVIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpiffeWorkloadAPIServer).FetchJWTSVID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/SpiffeWorkloadAPI/FetchJWTSVID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpiffeWorkloadAPIServer).FetchJWTSVID(ctx, req.(*JWTSVIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _SpiffeWorkloadAPI_FetchJWTBundles_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(JWTBundlesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SpiffeWorkloadAPIServer).FetchJWTBundles(m, &spiffeWorkloadAPIFetchJWTBundlesServer{stream}) +} + +type SpiffeWorkloadAPI_FetchJWTBundlesServer interface { + Send(*JWTBundlesResponse) error + grpc.ServerStream +} + +type spiffeWorkloadAPIFetchJWTBundlesServer struct { + grpc.ServerStream +} + +func (x *spiffeWorkloadAPIFetchJWTBundlesServer) Send(m *JWTBundlesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _SpiffeWorkloadAPI_ValidateJWTSVID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateJWTSVIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SpiffeWorkloadAPIServer).ValidateJWTSVID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/SpiffeWorkloadAPI/ValidateJWTSVID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SpiffeWorkloadAPIServer).ValidateJWTSVID(ctx, req.(*ValidateJWTSVIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _SpiffeWorkloadAPI_serviceDesc = grpc.ServiceDesc{ + ServiceName: "SpiffeWorkloadAPI", + HandlerType: (*SpiffeWorkloadAPIServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "FetchJWTSVID", + Handler: _SpiffeWorkloadAPI_FetchJWTSVID_Handler, + }, + { + MethodName: "ValidateJWTSVID", + Handler: _SpiffeWorkloadAPI_ValidateJWTSVID_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "FetchX509SVID", + Handler: _SpiffeWorkloadAPI_FetchX509SVID_Handler, + ServerStreams: true, + }, + { + StreamName: "FetchX509Bundles", + Handler: _SpiffeWorkloadAPI_FetchX509Bundles_Handler, + ServerStreams: true, + }, + { + StreamName: "FetchJWTBundles", + Handler: _SpiffeWorkloadAPI_FetchJWTBundles_Handler, + ServerStreams: true, + }, + }, + Metadata: "workload.proto", +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_allow.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_allow.go new file mode 100644 index 00000000000..9bd225dfb62 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_allow.go @@ -0,0 +1,42 @@ +//go:build spiffeid_charset_backcompat +// +build spiffeid_charset_backcompat + +package spiffeid + +func isBackcompatTrustDomainChar(c uint8) bool { + if isSubDelim(c) { + return true + } + switch c { + // unreserved + case '~': + return true + default: + return false + } +} + +func isBackcompatPathChar(c uint8) bool { + if isSubDelim(c) { + return true + } + switch c { + // unreserved + case '~': + return true + // gen-delims + case ':', '[', ']', '@': + return true + default: + return false + } +} + +func isSubDelim(c uint8) bool { + switch c { + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + return true + default: + return false + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_deny.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_deny.go new file mode 100644 index 00000000000..11447473b92 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/charset_backcompat_deny.go @@ -0,0 +1,12 @@ +//go:build !spiffeid_charset_backcompat +// +build !spiffeid_charset_backcompat + +package spiffeid + +func isBackcompatTrustDomainChar(c uint8) bool { + return false +} + +func isBackcompatPathChar(c uint8) bool { + return false +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/errors.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/errors.go new file mode 100644 index 00000000000..cc9defee140 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/errors.go @@ -0,0 +1,15 @@ +package spiffeid + +import "errors" + +var ( + errBadTrustDomainChar = errors.New("trust domain characters are limited to lowercase letters, numbers, dots, dashes, and underscores") + errBadPathSegmentChar = errors.New("path segment characters are limited to letters, numbers, dots, dashes, and underscores") + errDotSegment = errors.New("path cannot contain dot segments") + errNoLeadingSlash = errors.New("path must have a leading slash") + errEmpty = errors.New("cannot be empty") + errEmptySegment = errors.New("path cannot contain empty segments") + errMissingTrustDomain = errors.New("trust domain is missing") + errTrailingSlash = errors.New("path cannot have a trailing slash") + errWrongScheme = errors.New("scheme is missing or invalid") +) diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/id.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/id.go new file mode 100644 index 00000000000..f4e02eee7f8 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/id.go @@ -0,0 +1,258 @@ +package spiffeid + +import ( + "errors" + "fmt" + "net/url" + "strings" +) + +const ( + schemePrefix = "spiffe://" + schemePrefixLen = len(schemePrefix) +) + +// FromPath returns a new SPIFFE ID in the given trust domain and with the +// given path. The supplied path must be a valid absolute path according to the +// SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func FromPath(td TrustDomain, path string) (ID, error) { + if err := ValidatePath(path); err != nil { + return ID{}, err + } + return makeID(td, path) +} + +// FromPathf returns a new SPIFFE ID from the formatted path in the given trust +// domain. The formatted path must be a valid absolute path according to the +// SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func FromPathf(td TrustDomain, format string, args ...interface{}) (ID, error) { + path, err := FormatPath(format, args...) + if err != nil { + return ID{}, err + } + return makeID(td, path) +} + +// FromSegments returns a new SPIFFE ID in the given trust domain with joined +// path segments. The path segments must be valid according to the SPIFFE +// specification and must not contain path separators. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func FromSegments(td TrustDomain, segments ...string) (ID, error) { + path, err := JoinPathSegments(segments...) + if err != nil { + return ID{}, err + } + return makeID(td, path) +} + +// FromString parses a SPIFFE ID from a string. +func FromString(id string) (ID, error) { + switch { + case id == "": + return ID{}, errEmpty + case !strings.HasPrefix(id, schemePrefix): + return ID{}, errWrongScheme + } + + pathidx := schemePrefixLen + for ; pathidx < len(id); pathidx++ { + c := id[pathidx] + if c == '/' { + break + } + if !isValidTrustDomainChar(c) { + return ID{}, errBadTrustDomainChar + } + } + + if pathidx == schemePrefixLen { + return ID{}, errMissingTrustDomain + } + + if err := ValidatePath(id[pathidx:]); err != nil { + return ID{}, err + } + + return ID{ + id: id, + pathidx: pathidx, + }, nil +} + +// FromStringf parses a SPIFFE ID from a formatted string. +func FromStringf(format string, args ...interface{}) (ID, error) { + return FromString(fmt.Sprintf(format, args...)) +} + +// FromURI parses a SPIFFE ID from a URI. +func FromURI(uri *url.URL) (ID, error) { + return FromString(uri.String()) +} + +// ID is a SPIFFE ID +type ID struct { + id string + + // pathidx tracks the index to the beginning of the path inside of id. This + // is used when extracting the trust domain or path portions of the id. + pathidx int +} + +// TrustDomain returns the trust domain of the SPIFFE ID. +func (id ID) TrustDomain() TrustDomain { + if id.IsZero() { + return TrustDomain{} + } + return TrustDomain{name: id.id[schemePrefixLen:id.pathidx]} +} + +// MemberOf returns true if the SPIFFE ID is a member of the given trust domain. +func (id ID) MemberOf(td TrustDomain) bool { + return id.TrustDomain() == td +} + +// Path returns the path of the SPIFFE ID inside the trust domain. +func (id ID) Path() string { + return id.id[id.pathidx:] +} + +// String returns the string representation of the SPIFFE ID, e.g., +// "spiffe://example.org/foo/bar". +func (id ID) String() string { + return id.id +} + +// URL returns a URL for SPIFFE ID. +func (id ID) URL() *url.URL { + if id.IsZero() { + return &url.URL{} + } + + return &url.URL{ + Scheme: "spiffe", + Host: id.TrustDomain().String(), + Path: id.Path(), + } +} + +// IsZero returns true if the SPIFFE ID is the zero value. +func (id ID) IsZero() bool { + return id.id == "" +} + +// AppendPath returns an ID with the appended path. It will fail if called on a +// zero value. The path to append must be a valid absolute path according to +// the SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) AppendPath(path string) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot append path on a zero ID value") + } + if err := ValidatePath(path); err != nil { + return ID{}, err + } + id.id += path + return id, nil +} + +// AppendPathf returns an ID with the appended formatted path. It will fail if +// called on a zero value. The formatted path must be a valid absolute path +// according to the SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) AppendPathf(format string, args ...interface{}) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot append path on a zero ID value") + } + path, err := FormatPath(format, args...) + if err != nil { + return ID{}, err + } + id.id += path + return id, nil +} + +// AppendSegments returns an ID with the appended joined path segments. It +// will fail if called on a zero value. The path segments must be valid +// according to the SPIFFE specification and must not contain path separators. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) AppendSegments(segments ...string) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot append path segments on a zero ID value") + } + path, err := JoinPathSegments(segments...) + if err != nil { + return ID{}, err + } + id.id += path + return id, nil +} + +// Replace path returns an ID with the given path in the same trust domain. It +// will fail if called on a zero value. The given path must be a valid absolute +// path according to the SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) ReplacePath(path string) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot replace path on a zero ID value") + } + return FromPath(id.TrustDomain(), path) +} + +// ReplacePathf returns an ID with the formatted path in the same trust domain. +// It will fail if called on a zero value. The formatted path must be a valid +// absolute path according to the SPIFFE specification. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) ReplacePathf(format string, args ...interface{}) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot replace path on a zero ID value") + } + return FromPathf(id.TrustDomain(), format, args...) +} + +// ReplaceSegments returns an ID with the joined path segments in the same +// trust domain. It will fail if called on a zero value. The path segments must +// be valid according to the SPIFFE specification and must not contain path +// separators. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func (id ID) ReplaceSegments(segments ...string) (ID, error) { + if id.IsZero() { + return ID{}, errors.New("cannot replace path segments on a zero ID value") + } + return FromSegments(id.TrustDomain(), segments...) +} + +// MarshalText returns a text representation of the ID. If the ID is the zero +// value, nil is returned. +func (id ID) MarshalText() ([]byte, error) { + if id.IsZero() { + return nil, nil + } + return []byte(id.String()), nil +} + +// UnmarshalText decodes a text representation of the ID. If the text is empty, +// the ID is set to the zero value. +func (id *ID) UnmarshalText(text []byte) error { + if len(text) == 0 { + *id = ID{} + return nil + } + unmarshaled, err := FromString(string(text)) + if err != nil { + return err + } + *id = unmarshaled + return nil +} + +func makeID(td TrustDomain, path string) (ID, error) { + if td.IsZero() { + return ID{}, errors.New("trust domain is empty") + } + return ID{ + id: schemePrefix + td.name + path, + pathidx: schemePrefixLen + len(td.name), + }, nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/match.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/match.go new file mode 100644 index 00000000000..ae129738071 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/match.go @@ -0,0 +1,47 @@ +package spiffeid + +import "fmt" + +// Matcher is used to match a SPIFFE ID. +type Matcher func(ID) error + +// MatchAny matches any SPIFFE ID. +func MatchAny() Matcher { + return Matcher(func(actual ID) error { + return nil + }) +} + +// MatchID matches a specific SPIFFE ID. +func MatchID(expected ID) Matcher { + return Matcher(func(actual ID) error { + if actual != expected { + return fmt.Errorf("unexpected ID %q", actual) + } + return nil + }) +} + +// MatchOneOf matches any SPIFFE ID in the given list of IDs. +func MatchOneOf(expected ...ID) Matcher { + set := make(map[ID]struct{}) + for _, id := range expected { + set[id] = struct{}{} + } + return Matcher(func(actual ID) error { + if _, ok := set[actual]; !ok { + return fmt.Errorf("unexpected ID %q", actual) + } + return nil + }) +} + +// MatchMemberOf matches any SPIFFE ID in the given trust domain. +func MatchMemberOf(expected TrustDomain) Matcher { + return Matcher(func(actual ID) error { + if !actual.MemberOf(expected) { + return fmt.Errorf("unexpected trust domain %q", actual.TrustDomain()) + } + return nil + }) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go new file mode 100644 index 00000000000..7c75602c22d --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/path.go @@ -0,0 +1,101 @@ +package spiffeid + +import ( + "fmt" + "strings" +) + +// FormatPath builds a path by formatting the given formatting string with +// the given args (i.e. fmt.Sprintf). The resulting path must be valid or +// an error is returned. +func FormatPath(format string, args ...interface{}) (string, error) { + path := fmt.Sprintf(format, args...) + if err := ValidatePath(path); err != nil { + return "", err + } + return path, nil +} + +// JoinPathSegments joins one or more path segments into a slash separated +// path. Segments cannot contain slashes. The resulting path must be valid or +// an error is returned. If no segments are provided, an empty string is +// returned. +func JoinPathSegments(segments ...string) (string, error) { + var builder strings.Builder + for _, segment := range segments { + if err := validatePathSegment(segment); err != nil { + return "", err + } + builder.WriteByte('/') + builder.WriteString(segment) + } + return builder.String(), nil +} + +// ValidatePath validates that a path string is a conformant path for a SPIFFE +// ID. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#22-path +func ValidatePath(path string) error { + switch { + case path == "": + return nil + case path[0] != '/': + return errNoLeadingSlash + } + + segmentStart := 0 + segmentEnd := 0 + for ; segmentEnd < len(path); segmentEnd++ { + c := path[segmentEnd] + if c == '/' { + switch path[segmentStart:segmentEnd] { + case "/": + return errEmptySegment + case "/.", "/..": + return errDotSegment + } + segmentStart = segmentEnd + continue + } + if !isValidPathSegmentChar(c) { + return errBadPathSegmentChar + } + } + + switch path[segmentStart:segmentEnd] { + case "/": + return errTrailingSlash + case "/.", "/..": + return errDotSegment + } + return nil +} + +func validatePathSegment(segment string) error { + if segment == "" { + return errEmptySegment + } + for i := 0; i < len(segment); i++ { + if !isValidPathSegmentChar(segment[i]) { + return errBadPathSegmentChar + } + } + return nil +} + +func isValidPathSegmentChar(c uint8) bool { + switch { + case c >= 'a' && c <= 'z': + return true + case c >= 'A' && c <= 'Z': + return true + case c >= '0' && c <= '9': + return true + case c == '-', c == '.', c == '_': + return true + case isBackcompatPathChar(c): + return true + default: + return false + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/require.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/require.go new file mode 100644 index 00000000000..798b54c1bbc --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/require.go @@ -0,0 +1,103 @@ +package spiffeid + +import ( + "net/url" +) + +// RequireFromPath is similar to FromPath except that instead of returning an +// error on malformed input, it panics. It should only be used when the input +// is statically verifiable. +func RequireFromPath(td TrustDomain, path string) ID { + id, err := FromPath(td, path) + panicOnErr(err) + return id +} + +// RequireFromPathf is similar to FromPathf except that instead of returning an +// error on malformed input, it panics. It should only be used when the input +// is statically verifiable. +func RequireFromPathf(td TrustDomain, format string, args ...interface{}) ID { + id, err := FromPathf(td, format, args...) + panicOnErr(err) + return id +} + +// RequireFromSegments is similar to FromSegments except that instead of +// returning an error on malformed input, it panics. It should only be used +// when the input is statically verifiable. +func RequireFromSegments(td TrustDomain, segments ...string) ID { + id, err := FromSegments(td, segments...) + panicOnErr(err) + return id +} + +// RequireFromString is similar to FromString except that instead of returning +// an error on malformed input, it panics. It should only be used when the +// input is statically verifiable. +func RequireFromString(s string) ID { + id, err := FromString(s) + panicOnErr(err) + return id +} + +// RequireFromStringf is similar to FromStringf except that instead of +// returning an error on malformed input, it panics. It should only be used +// when the input is statically verifiable. +func RequireFromStringf(format string, args ...interface{}) ID { + id, err := FromStringf(format, args...) + panicOnErr(err) + return id +} + +// RequireFromURI is similar to FromURI except that instead of returning an +// error on malformed input, it panics. It should only be used when the input is +// statically verifiable. +func RequireFromURI(uri *url.URL) ID { + id, err := FromURI(uri) + panicOnErr(err) + return id +} + +// RequireTrustDomainFromString is similar to TrustDomainFromString except that +// instead of returning an error on malformed input, it panics. It should only +// be used when the input is statically verifiable. +func RequireTrustDomainFromString(s string) TrustDomain { + td, err := TrustDomainFromString(s) + panicOnErr(err) + return td +} + +// RequireTrustDomainFromURI is similar to TrustDomainFromURI except that +// instead of returning an error on malformed input, it panics. It should only +// be used when the input is statically verifiable. +func RequireTrustDomainFromURI(uri *url.URL) TrustDomain { + td, err := TrustDomainFromURI(uri) + panicOnErr(err) + return td +} + +// RequireFormatPath builds a path by formatting the given formatting string +// with the given args (i.e. fmt.Sprintf). The resulting path must be valid or +// the function panics. It should only be used when the input is statically +// verifiable. +func RequireFormatPath(format string, args ...interface{}) string { + path, err := FormatPath(format, args...) + panicOnErr(err) + return path +} + +// RequireJoinPathSegments joins one or more path segments into a slash separated +// path. Segments cannot contain slashes. The resulting path must be valid or +// the function panics. It should only be used when the input is statically +// verifiable. +func RequireJoinPathSegments(segments ...string) string { + path, err := JoinPathSegments(segments...) + panicOnErr(err) + return path +} + +func panicOnErr(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/trustdomain.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/trustdomain.go new file mode 100644 index 00000000000..4e3157a6931 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffeid/trustdomain.go @@ -0,0 +1,122 @@ +package spiffeid + +import ( + "net/url" + "strings" +) + +// TrustDomain represents the trust domain portion of a SPIFFE ID (e.g. +// example.org). +type TrustDomain struct { + name string +} + +// TrustDomainFromString returns a new TrustDomain from a string. The string +// can either be a trust domain name (e.g. example.org), or a valid SPIFFE ID +// URI (e.g. spiffe://example.org), otherwise an error is returned. +// See https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE-ID.md#21-trust-domain. +func TrustDomainFromString(idOrName string) (TrustDomain, error) { + switch { + case idOrName == "": + return TrustDomain{}, errMissingTrustDomain + case strings.Contains(idOrName, ":/"): + // The ID looks like it has something like a scheme separator, let's + // try to parse as an ID. We use :/ instead of :// since the + // diagnostics are better for a bad input like spiffe:/trustdomain. + id, err := FromString(idOrName) + if err != nil { + return TrustDomain{}, err + } + return id.TrustDomain(), nil + default: + for i := 0; i < len(idOrName); i++ { + if !isValidTrustDomainChar(idOrName[i]) { + return TrustDomain{}, errBadTrustDomainChar + } + } + return TrustDomain{name: idOrName}, nil + } +} + +// TrustDomainFromURI returns a new TrustDomain from a URI. The URI must be a +// valid SPIFFE ID (see FromURI) or an error is returned. The trust domain is +// extracted from the host field. +func TrustDomainFromURI(uri *url.URL) (TrustDomain, error) { + id, err := FromURI(uri) + if err != nil { + return TrustDomain{}, err + } + + return id.TrustDomain(), nil +} + +// String returns the trust domain as a string, e.g. example.org. +func (td TrustDomain) String() string { + return td.name +} + +// ID returns the SPIFFE ID of the trust domain. +func (td TrustDomain) ID() ID { + if id, err := makeID(td, ""); err == nil { + return id + } + return ID{} +} + +// IDString returns a string representation of the the SPIFFE ID of the trust +// domain, e.g. "spiffe://example.org". +func (td TrustDomain) IDString() string { + return td.ID().String() +} + +// IsZero returns true if the trust domain is the zero value. +func (td TrustDomain) IsZero() bool { + return td.name == "" +} + +// Compare returns an integer comparing the trust domain to another +// lexicographically. The result will be 0 if td==other, -1 if td < other, and +// +1 if td > other. +func (td TrustDomain) Compare(other TrustDomain) int { + return strings.Compare(td.name, other.name) +} + +// MarshalText returns a text representation of the trust domain. If the trust +// domain is the zero value, nil is returned. +func (td TrustDomain) MarshalText() ([]byte, error) { + if td.IsZero() { + return nil, nil + } + return []byte(td.String()), nil +} + +// UnmarshalText decodes a text representation of the trust domain. If the text +// is empty, the trust domain is set to the zero value. +func (td *TrustDomain) UnmarshalText(text []byte) error { + if len(text) == 0 { + *td = TrustDomain{} + return nil + } + + unmarshaled, err := TrustDomainFromString(string(text)) + if err != nil { + return err + } + *td = unmarshaled + return nil +} + +func isValidTrustDomainChar(c uint8) bool { + switch { + case c >= 'a' && c <= 'z': + return true + case c >= '0' && c <= '9': + return true + case c == '-', c == '.', c == '_': + return true + case isBackcompatTrustDomainChar(c): + return true + default: + return false + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go new file mode 100644 index 00000000000..b3f7e7e4ad2 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go @@ -0,0 +1,40 @@ +package tlsconfig + +import ( + "crypto/x509" + + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Authorizer authorizes an X509-SVID given the SPIFFE ID and the chain +// of trust. The certificate chain starts with the X509-SVID certificate back +// to an X.509 root for the trust domain. +type Authorizer func(id spiffeid.ID, verifiedChains [][]*x509.Certificate) error + +// AuthorizeAny allows any SPIFFE ID. +func AuthorizeAny() Authorizer { + return AdaptMatcher(spiffeid.MatchAny()) +} + +// AuthorizeID allows a specific SPIFFE ID. +func AuthorizeID(allowed spiffeid.ID) Authorizer { + return AdaptMatcher(spiffeid.MatchID(allowed)) +} + +// AuthorizeOneOf allows any SPIFFE ID in the given list of IDs. +func AuthorizeOneOf(allowed ...spiffeid.ID) Authorizer { + return AdaptMatcher(spiffeid.MatchOneOf(allowed...)) +} + +// AuthorizeMemberOf allows any SPIFFE ID in the given trust domain. +func AuthorizeMemberOf(allowed spiffeid.TrustDomain) Authorizer { + return AdaptMatcher(spiffeid.MatchMemberOf(allowed)) +} + +// AdaptMatcher adapts any spiffeid.Matcher for use as an Authorizer which +// only authorizes the SPIFFE ID but otherwise ignores the verified chains. +func AdaptMatcher(matcher spiffeid.Matcher) Authorizer { + return Authorizer(func(actual spiffeid.ID, verifiedChains [][]*x509.Certificate) error { + return matcher(actual) + }) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go new file mode 100644 index 00000000000..53b36ed07c2 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go @@ -0,0 +1,245 @@ +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" +) + +// TLSClientConfig returns a TLS configuration which verifies and authorizes +// the server X509-SVID. +func TLSClientConfig(bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookTLSClientConfig(config, bundle, authorizer, opts...) + return config +} + +// HookTLSClientConfig sets up the TLS configuration to verify and authorize +// the server X509-SVID. If there is an existing callback set for +// VerifyPeerCertificate it will be wrapped by by this package and invoked +// after SPIFFE authentication has completed. +func HookTLSClientConfig(config *tls.Config, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.InsecureSkipVerify = true + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// A Option changes the defaults used to by mTLS ClientConfig functions. +type Option interface { + apply(*options) +} + +type option func(*options) + +func (fn option) apply(o *options) { fn(o) } + +type options struct { + trace Trace +} + +func newOptions(opts []Option) *options { + out := &options{} + for _, opt := range opts { + opt.apply(out) + } + return out +} + +// WithTrace will use the provided tracing callbacks +// when various TLS config functions gets invoked. +func WithTrace(trace Trace) Option { + return option(func(opts *options) { + opts.trace = trace + }) +} + +// MTLSClientConfig returns a TLS configuration which presents an X509-SVID +// to the server and verifies and authorizes the server X509-SVID. +func MTLSClientConfig(svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSClientConfig(config, svid, bundle, authorizer, opts...) + return config +} + +// HookMTLSClientConfig sets up the TLS configuration to present an X509-SVID +// to the server and verify and authorize the server X509-SVID. If there is an +// existing callback set for VerifyPeerCertificate it will be wrapped by by +// this package and invoked after SPIFFE authentication has completed. +func HookMTLSClientConfig(config *tls.Config, svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.GetClientCertificate = GetClientCertificate(svid, opts...) + config.InsecureSkipVerify = true + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// MTLSWebClientConfig returns a TLS configuration which presents an X509-SVID +// to the server and verifies the server certificate using provided roots (or +// the system roots if nil). +func MTLSWebClientConfig(svid x509svid.Source, roots *x509.CertPool, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSWebClientConfig(config, svid, roots, opts...) + return config +} + +// HookMTLSWebClientConfig sets up the TLS configuration to present an +// X509-SVID to the server and verifies the server certificate using the +// provided roots (or the system roots if nil). +func HookMTLSWebClientConfig(config *tls.Config, svid x509svid.Source, roots *x509.CertPool, opts ...Option) { + resetAuthFields(config) + config.GetClientCertificate = GetClientCertificate(svid, opts...) + config.RootCAs = roots +} + +// TLSServerConfig returns a TLS configuration which presents an X509-SVID +// to the client and does not require or verify client certificates. +func TLSServerConfig(svid x509svid.Source, opts ...Option) *tls.Config { + config := new(tls.Config) + HookTLSServerConfig(config, svid, opts...) + return config +} + +// HookTLSServerConfig sets up the TLS configuration to present an X509-SVID +// to the client and to not require or verify client certificates. +func HookTLSServerConfig(config *tls.Config, svid x509svid.Source, opts ...Option) { + resetAuthFields(config) + config.GetCertificate = GetCertificate(svid, opts...) +} + +// MTLSServerConfig returns a TLS configuration which presents an X509-SVID +// to the client and requires, verifies, and authorizes client X509-SVIDs. +func MTLSServerConfig(svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSServerConfig(config, svid, bundle, authorizer, opts...) + return config +} + +// HookMTLSServerConfig sets up the TLS configuration to present an X509-SVID +// to the client and require, verify, and authorize the client X509-SVID. If +// there is an existing callback set for VerifyPeerCertificate it will be +// wrapped by by this package and invoked after SPIFFE authentication has +// completed. +func HookMTLSServerConfig(config *tls.Config, svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.ClientAuth = tls.RequireAnyClientCert + config.GetCertificate = GetCertificate(svid, opts...) + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// MTLSWebServerConfig returns a TLS configuration which presents a web +// server certificate to the client and requires, verifies, and authorizes +// client X509-SVIDs. +func MTLSWebServerConfig(cert *tls.Certificate, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSWebServerConfig(config, cert, bundle, authorizer, opts...) + return config +} + +// HookMTLSWebServerConfig sets up the TLS configuration to presents a web +// server certificate to the client and require, verify, and authorize client +// X509-SVIDs. If there is an existing callback set for VerifyPeerCertificate +// it will be wrapped by by this package and invoked after SPIFFE +// authentication has completed. +func HookMTLSWebServerConfig(config *tls.Config, cert *tls.Certificate, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.ClientAuth = tls.RequireAnyClientCert + config.Certificates = []tls.Certificate{*cert} + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// GetCertificate returns a GetCertificate callback for tls.Config. It uses the +// given X509-SVID getter to obtain a server X509-SVID for the TLS handshake. +func GetCertificate(svid x509svid.Source, opts ...Option) func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + opt := newOptions(opts) + return func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return getTLSCertificate(svid, opt.trace) + } +} + +// GetClientCertificate returns a GetClientCertificate callback for tls.Config. +// It uses the given X509-SVID getter to obtain a client X509-SVID for the TLS +// handshake. +func GetClientCertificate(svid x509svid.Source, opts ...Option) func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + opt := newOptions(opts) + return func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return getTLSCertificate(svid, opt.trace) + } +} + +// VerifyPeerCertificate returns a VerifyPeerCertificate callback for +// tls.Config. It uses the given bundle source and authorizer to verify and +// authorize X509-SVIDs provided by peers during the TLS handshake. +func VerifyPeerCertificate(bundle x509bundle.Source, authorizer Authorizer, opts ...Option) func([][]byte, [][]*x509.Certificate) error { + return func(raw [][]byte, _ [][]*x509.Certificate) error { + id, certs, err := x509svid.ParseAndVerify(raw, bundle) + if err != nil { + return err + } + + return authorizer(id, certs) + } +} + +// WrapVerifyPeerCertificate wraps a VeriyPeerCertificate callback, performing +// SPIFFE authentication against the peer certificates using the given bundle and +// authorizer. The wrapped callback will be passed the verified chains. +// Note: TLS clients must set `InsecureSkipVerify` when doing SPIFFE authentication to disable hostname verification. +func WrapVerifyPeerCertificate(wrapped func([][]byte, [][]*x509.Certificate) error, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) func([][]byte, [][]*x509.Certificate) error { + if wrapped == nil { + return VerifyPeerCertificate(bundle, authorizer, opts...) + } + + return func(raw [][]byte, _ [][]*x509.Certificate) error { + id, certs, err := x509svid.ParseAndVerify(raw, bundle) + if err != nil { + return err + } + + if err := authorizer(id, certs); err != nil { + return err + } + + return wrapped(raw, certs) + } +} + +func getTLSCertificate(svid x509svid.Source, trace Trace) (*tls.Certificate, error) { + var traceVal interface{} + if trace.GetCertificate != nil { + traceVal = trace.GetCertificate(GetCertificateInfo{}) + } + + s, err := svid.GetX509SVID() + if err != nil { + if trace.GotCertificate != nil { + trace.GotCertificate(GotCertificateInfo{Err: err}, traceVal) + } + return nil, err + } + + cert := &tls.Certificate{ + Certificate: make([][]byte, 0, len(s.Certificates)), + PrivateKey: s.PrivateKey, + } + + for _, svidCert := range s.Certificates { + cert.Certificate = append(cert.Certificate, svidCert.Raw) + } + + if trace.GotCertificate != nil { + trace.GotCertificate(GotCertificateInfo{Cert: cert}, traceVal) + } + + return cert, nil +} + +func resetAuthFields(config *tls.Config) { + config.Certificates = nil + config.ClientAuth = tls.NoClientCert + config.GetCertificate = nil + config.GetClientCertificate = nil + config.InsecureSkipVerify = false + config.NameToCertificate = nil //nolint:staticcheck // setting to nil is OK + config.RootCAs = nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go new file mode 100644 index 00000000000..954d3945d3f --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go @@ -0,0 +1,22 @@ +package tlsconfig + +import ( + "crypto/tls" +) + +// GetCertificateInfo is an empty placeholder for future expansion +type GetCertificateInfo struct { +} + +// GotCertificateInfo provides err and TLS certificate info to Trace +type GotCertificateInfo struct { + Cert *tls.Certificate + Err error +} + +// Trace is the interface to define what functions are triggered when functions +// in tlsconfig are called +type Trace struct { + GetCertificate func(GetCertificateInfo) interface{} + GotCertificate func(GotCertificateInfo, interface{}) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/source.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/source.go new file mode 100644 index 00000000000..ae2fda3d571 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/source.go @@ -0,0 +1,21 @@ +package jwtsvid + +import ( + "context" + + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Params are JWT-SVID parameters used when fetching a new JWT-SVID. +type Params struct { + Audience string + ExtraAudiences []string + Subject spiffeid.ID +} + +// Source represents a source of JWT-SVIDs. +type Source interface { + // FetchJWTSVID fetches a JWT-SVID from the source with the given + // parameters. + FetchJWTSVID(ctx context.Context, params Params) (*SVID, error) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go new file mode 100644 index 00000000000..a3e0fe73f8c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/jwtsvid/svid.go @@ -0,0 +1,167 @@ +package jwtsvid + +import ( + "fmt" + "time" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" + "gopkg.in/square/go-jose.v2" + "gopkg.in/square/go-jose.v2/jwt" +) + +var ( + jwtsvidErr = errs.Class("jwtsvid") +) + +// tokenValidator validates the token and returns the claims +type tokenValidator = func(*jwt.JSONWebToken, spiffeid.TrustDomain) (map[string]interface{}, error) + +// SVID represents a JWT-SVID. +type SVID struct { + // ID is the SPIFFE ID of the JWT-SVID as present in the 'sub' claim + ID spiffeid.ID + // Audience is the intended recipients of JWT-SVID as present in the 'aud' claim + Audience []string + // Expiry is the expiration time of JWT-SVID as present in 'exp' claim + Expiry time.Time + // Claims is the parsed claims from token + Claims map[string]interface{} + + // token is the serialized JWT token + token string +} + +// ParseAndValidate parses and validates a JWT-SVID token and returns the +// JWT-SVID. The JWT-SVID signature is verified using the JWT bundle source. +func ParseAndValidate(token string, bundles jwtbundle.Source, audience []string) (*SVID, error) { + return parse(token, audience, func(tok *jwt.JSONWebToken, trustDomain spiffeid.TrustDomain) (map[string]interface{}, error) { + // Obtain the key ID from the header + keyID := tok.Headers[0].KeyID + if keyID == "" { + return nil, jwtsvidErr.New("token header missing key id") + } + + // Get JWT Bundle + bundle, err := bundles.GetJWTBundleForTrustDomain(trustDomain) + if err != nil { + return nil, jwtsvidErr.New("no bundle found for trust domain %q", trustDomain) + } + + // Find JWT authority using the key ID from the token header + authority, ok := bundle.FindJWTAuthority(keyID) + if !ok { + return nil, jwtsvidErr.New("no JWT authority %q found for trust domain %q", keyID, trustDomain) + } + + // Obtain and verify the token claims using the obtained JWT authority + claimsMap := make(map[string]interface{}) + if err := tok.Claims(authority, &claimsMap); err != nil { + return nil, jwtsvidErr.New("unable to get claims from token: %v", err) + } + + return claimsMap, nil + }) +} + +// ParseInsecure parses and validates a JWT-SVID token and returns the +// JWT-SVID. The JWT-SVID signature is not verified. +func ParseInsecure(token string, audience []string) (*SVID, error) { + return parse(token, audience, func(tok *jwt.JSONWebToken, td spiffeid.TrustDomain) (map[string]interface{}, error) { + // Obtain the token claims insecurely, i.e. without signature verification + claimsMap := make(map[string]interface{}) + if err := tok.UnsafeClaimsWithoutVerification(&claimsMap); err != nil { + return nil, jwtsvidErr.New("unable to get claims from token: %v", err) + } + + return claimsMap, nil + }) +} + +// Marshal returns the JWT-SVID marshaled to a string. The returned value is +// the same token value originally passed to ParseAndValidate. +func (svid *SVID) Marshal() string { + return svid.token +} + +func parse(token string, audience []string, getClaims tokenValidator) (*SVID, error) { + // Parse serialized token + tok, err := jwt.ParseSigned(token) + if err != nil { + return nil, jwtsvidErr.New("unable to parse JWT token") + } + + // Validates supported token signed algorithm + if err := validateTokenAlgorithm(tok); err != nil { + return nil, err + } + + // Parse out the unverified claims. We need to look up the key by the trust + // domain of the SPIFFE ID. + var claims jwt.Claims + if err := tok.UnsafeClaimsWithoutVerification(&claims); err != nil { + return nil, jwtsvidErr.New("unable to get claims from token: %v", err) + } + + switch { + case claims.Subject == "": + return nil, jwtsvidErr.New("token missing subject claim") + case claims.Expiry == nil: + return nil, jwtsvidErr.New("token missing exp claim") + } + + spiffeID, err := spiffeid.FromString(claims.Subject) + if err != nil { + return nil, jwtsvidErr.New("token has an invalid subject claim: %v", err) + } + + // Create generic map of claims + claimsMap, err := getClaims(tok, spiffeID.TrustDomain()) + if err != nil { + return nil, err + } + + // Validate the standard claims. + if err := claims.Validate(jwt.Expected{ + Audience: audience, + Time: time.Now(), + }); err != nil { + // Convert expected validation errors for pretty errors + switch err { + case jwt.ErrExpired: + err = jwtsvidErr.New("token has expired") + case jwt.ErrInvalidAudience: + err = jwtsvidErr.New("expected audience in %q (audience=%q)", audience, claims.Audience) + } + return nil, err + } + + return &SVID{ + ID: spiffeID, + Audience: claims.Audience, + Expiry: claims.Expiry.Time().UTC(), + Claims: claimsMap, + token: token, + }, nil +} + +// validateTokenAlgorithm json web token have only one header, and it is signed for a supported algorithm +func validateTokenAlgorithm(tok *jwt.JSONWebToken) error { + // Only one header is expected + if len(tok.Headers) != 1 { + return fmt.Errorf("expected a single token header; got %d", len(tok.Headers)) + } + + // Make sure it has an algorithm supported by JWT-SVID + alg := tok.Headers[0].Algorithm + switch jose.SignatureAlgorithm(alg) { + case jose.RS256, jose.RS384, jose.RS512, + jose.ES256, jose.ES384, jose.ES512, + jose.PS256, jose.PS384, jose.PS512: + default: + return jwtsvidErr.New("unsupported token signature algorithm %q", alg) + } + + return nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/source.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/source.go new file mode 100644 index 00000000000..df34ed77772 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/source.go @@ -0,0 +1,7 @@ +package x509svid + +// Source represents a source of X509-SVIDs. +type Source interface { + // GetX509SVID returns an X509-SVID from the source. + GetX509SVID() (*SVID, error) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go new file mode 100644 index 00000000000..5fecffe8fb8 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/svid.go @@ -0,0 +1,239 @@ +package x509svid + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "io/ioutil" + + "github.com/spiffe/go-spiffe/v2/internal/pemutil" + "github.com/spiffe/go-spiffe/v2/internal/x509util" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" +) + +// SVID represents a SPIFFE X509-SVID. +type SVID struct { + // ID is the SPIFFE ID of the X509-SVID. + ID spiffeid.ID + + // Certificates are the X.509 certificates of the X509-SVID. The leaf + // certificate is the X509-SVID certificate. Any remaining certificates ( + // if any) chain the X509-SVID certificate back to a X.509 root for the + // trust domain. + Certificates []*x509.Certificate + + // PrivateKey is the private key for the X509-SVID. + PrivateKey crypto.Signer +} + +// Load loads the X509-SVID from PEM encoded files on disk. certFile and +// keyFile may be the same file. +func Load(certFile, keyFile string) (*SVID, error) { + certBytes, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, x509svidErr.New("cannot read certificate file: %w", err) + } + + keyBytes, err := ioutil.ReadFile(keyFile) + if err != nil { + return nil, x509svidErr.New("cannot read key file: %w", err) + } + + return Parse(certBytes, keyBytes) +} + +// Parse parses the X509-SVID from PEM blocks containing certificate and key +// bytes. The certificate must be one or more PEM blocks with ASN.1 DER. The +// key must be a PEM block with PKCS#8 ASN.1 DER. +func Parse(certBytes, keyBytes []byte) (*SVID, error) { + certs, err := pemutil.ParseCertificates(certBytes) + if err != nil { + return nil, x509svidErr.New("cannot parse PEM encoded certificate: %v", err) + } + + privateKey, err := pemutil.ParsePrivateKey(keyBytes) + if err != nil { + return nil, x509svidErr.New("cannot parse PEM encoded private key: %v", err) + } + + return newSVID(certs, privateKey) +} + +// ParseRaw parses the X509-SVID from certificate and key bytes. The +// certificate must be ASN.1 DER (concatenated with no intermediate +// padding if there are more than one certificate). The key must be a PKCS#8 +// ASN.1 DER. +func ParseRaw(certBytes, keyBytes []byte) (*SVID, error) { + certificates, err := x509.ParseCertificates(certBytes) + if err != nil { + return nil, x509svidErr.New("cannot parse DER encoded certificate: %v", err) + } + + privateKey, err := x509.ParsePKCS8PrivateKey(keyBytes) + if err != nil { + return nil, x509svidErr.New("cannot parse DER encoded private key: %v", err) + } + + return newSVID(certificates, privateKey) +} + +// Marshal marshals the X509-SVID and returns PEM encoded blocks for the SVID +// and private key. +func (s *SVID) Marshal() ([]byte, []byte, error) { + if len(s.Certificates) == 0 { + return nil, nil, x509svidErr.New("no certificates to marshal") + } + certBytes := pemutil.EncodeCertificates(s.Certificates) + keyBytes, err := pemutil.EncodePKCS8PrivateKey(s.PrivateKey) + if err != nil { + return nil, nil, x509svidErr.New("cannot encode private key: %v", err) + } + + return certBytes, keyBytes, nil +} + +// MarshalRaw marshals the X509-SVID and returns ASN.1 DER for the certificates +// (concatenated with no intermediate padding) and PKCS8 ASN1.DER for the +// private key. +func (s *SVID) MarshalRaw() ([]byte, []byte, error) { + key, err := x509.MarshalPKCS8PrivateKey(s.PrivateKey) + if err != nil { + return nil, nil, x509svidErr.New("cannot marshal private key: %v", err) + } + + if len(s.Certificates) == 0 { + return nil, nil, x509svidErr.New("no certificates to marshal") + } + + certs := x509util.ConcatRawCertsFromCerts(s.Certificates) + return certs, key, nil +} + +// GetX509SVID returns the X509-SVID. It implements the Source interface. +func (s *SVID) GetX509SVID() (*SVID, error) { + return s, nil +} + +func newSVID(certificates []*x509.Certificate, privateKey crypto.PrivateKey) (*SVID, error) { + spiffeID, err := validateCertificates(certificates) + if err != nil { + return nil, x509svidErr.New("certificate validation failed: %v", err) + } + + signer, err := validatePrivateKey(privateKey, certificates[0]) + if err != nil { + return nil, x509svidErr.New("private key validation failed: %v", err) + } + + return &SVID{ + Certificates: certificates, + PrivateKey: signer, + ID: *spiffeID, + }, nil +} + +// validate the slice of certificates constitutes a valid SVID chain according +// to the spiffe standard and returns the spiffe id of the leaf certificate +func validateCertificates(certificates []*x509.Certificate) (*spiffeid.ID, error) { + if len(certificates) == 0 { + return nil, errs.New("no certificates found") + } + + leafID, err := validateLeafCertificate(certificates[0]) + if err != nil { + return nil, err + } + + err = validateSigningCertificates(certificates[1:]) + if err != nil { + return nil, err + } + + return leafID, nil +} + +func validateLeafCertificate(leaf *x509.Certificate) (*spiffeid.ID, error) { + leafID, err := IDFromCert(leaf) + if err != nil { + return nil, errs.New("cannot get leaf certificate SPIFFE ID: %v", err) + } + if leaf.IsCA { + return nil, errs.New("leaf certificate must not have CA flag set to true") + } + + err = validateKeyUsage(leaf) + if err != nil { + return nil, err + } + + return &leafID, err +} + +func validateSigningCertificates(signingCerts []*x509.Certificate) error { + for _, cert := range signingCerts { + if !cert.IsCA { + return errs.New("signing certificate must have CA flag set to true") + } + if cert.KeyUsage&x509.KeyUsageCertSign == 0 { + return errs.New("signing certificate must have 'keyCertSign' set as key usage") + } + } + + return nil +} + +func validateKeyUsage(leaf *x509.Certificate) error { + switch { + case leaf.KeyUsage&x509.KeyUsageDigitalSignature == 0: + return errs.New("leaf certificate must have 'digitalSignature' set as key usage") + case leaf.KeyUsage&x509.KeyUsageCertSign > 0: + return errs.New("leaf certificate must not have 'keyCertSign' set as key usage") + case leaf.KeyUsage&x509.KeyUsageCRLSign > 0: + return errs.New("leaf certificate must not have 'cRLSign' set as key usage") + } + return nil +} + +func validatePrivateKey(privateKey crypto.PrivateKey, leaf *x509.Certificate) (crypto.Signer, error) { + if privateKey == nil { + return nil, errs.New("no private key found") + } + + matched, err := keyMatches(privateKey, leaf.PublicKey) + if err != nil { + return nil, err + } + if !matched { + return nil, errs.New("leaf certificate does not match private key") + } + + signer, ok := privateKey.(crypto.Signer) + if !ok { + return nil, errs.New("expected crypto.Signer; got %T", privateKey) + } + + return signer, nil +} + +func keyMatches(privateKey crypto.PrivateKey, publicKey crypto.PublicKey) (bool, error) { + switch privateKey := privateKey.(type) { + case *rsa.PrivateKey: + rsaPublicKey, ok := publicKey.(*rsa.PublicKey) + return ok && rsaPublicKeyEqual(&privateKey.PublicKey, rsaPublicKey), nil + case *ecdsa.PrivateKey: + ecdsaPublicKey, ok := publicKey.(*ecdsa.PublicKey) + return ok && ecdsaPublicKeyEqual(&privateKey.PublicKey, ecdsaPublicKey), nil + default: + return false, errs.New("unsupported private key type %T", privateKey) + } +} + +func rsaPublicKeyEqual(a, b *rsa.PublicKey) bool { + return a.E == b.E && a.N.Cmp(b.N) == 0 +} + +func ecdsaPublicKeyEqual(a, b *ecdsa.PublicKey) bool { + return a.Curve == b.Curve && a.X.Cmp(b.X) == 0 && a.Y.Cmp(b.Y) == 0 +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/verify.go b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/verify.go new file mode 100644 index 00000000000..681d2844a13 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/svid/x509svid/verify.go @@ -0,0 +1,113 @@ +package x509svid + +import ( + "crypto/x509" + "time" + + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/internal/x509util" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" +) + +var x509svidErr = errs.Class("x509svid") + +// VerifyOption is an option used when verifying X509-SVIDs. +type VerifyOption interface { + apply(config *verifyConfig) +} + +// WithTime sets the time used when verifying validity periods on the X509-SVID. +// If not used, the current time will be used. +func WithTime(now time.Time) VerifyOption { + return verifyOption(func(config *verifyConfig) { + config.now = now + }) +} + +// Verify verifies an X509-SVID chain using the X.509 bundle source. It +// returns the SPIFFE ID of the X509-SVID and one or more chains back to a root +// in the bundle. +func Verify(certs []*x509.Certificate, bundleSource x509bundle.Source, opts ...VerifyOption) (spiffeid.ID, [][]*x509.Certificate, error) { + config := &verifyConfig{} + for _, opt := range opts { + opt.apply(config) + } + + switch { + case len(certs) == 0: + return spiffeid.ID{}, nil, x509svidErr.New("empty certificates chain") + case bundleSource == nil: + return spiffeid.ID{}, nil, x509svidErr.New("bundleSource is required") + } + + leaf := certs[0] + id, err := IDFromCert(leaf) + if err != nil { + return spiffeid.ID{}, nil, x509svidErr.New("could not get leaf SPIFFE ID: %w", err) + } + + switch { + case leaf.IsCA: + return id, nil, x509svidErr.New("leaf certificate with CA flag set to true") + case leaf.KeyUsage&x509.KeyUsageCertSign > 0: + return id, nil, x509svidErr.New("leaf certificate with KeyCertSign key usage") + case leaf.KeyUsage&x509.KeyUsageCRLSign > 0: + return id, nil, x509svidErr.New("leaf certificate with KeyCrlSign key usage") + } + + bundle, err := bundleSource.GetX509BundleForTrustDomain(id.TrustDomain()) + if err != nil { + return id, nil, x509svidErr.New("could not get X509 bundle: %w", err) + } + + verifiedChains, err := leaf.Verify(x509.VerifyOptions{ + Roots: x509util.NewCertPool(bundle.X509Authorities()), + Intermediates: x509util.NewCertPool(certs[1:]), + KeyUsages: []x509.ExtKeyUsage{x509.ExtKeyUsageAny}, + CurrentTime: config.now, + }) + if err != nil { + return id, nil, x509svidErr.New("could not verify leaf certificate: %w", err) + } + + return id, verifiedChains, nil +} + +// ParseAndVerify parses and verifies an X509-SVID chain using the X.509 +// bundle source. It returns the SPIFFE ID of the X509-SVID and one or more +// chains back to a root in the bundle. +func ParseAndVerify(rawCerts [][]byte, bundleSource x509bundle.Source, opts ...VerifyOption) (spiffeid.ID, [][]*x509.Certificate, error) { + var certs []*x509.Certificate + for _, rawCert := range rawCerts { + cert, err := x509.ParseCertificate(rawCert) + if err != nil { + return spiffeid.ID{}, nil, x509svidErr.New("unable to parse certificate: %w", err) + } + certs = append(certs, cert) + } + return Verify(certs, bundleSource, opts...) +} + +// IDFromCert extracts the SPIFFE ID from the URI SAN of the provided +// certificate. It will return an an error if the certificate does not have +// exactly one URI SAN with a well-formed SPIFFE ID. +func IDFromCert(cert *x509.Certificate) (spiffeid.ID, error) { + switch { + case len(cert.URIs) == 0: + return spiffeid.ID{}, errs.New("certificate contains no URI SAN") + case len(cert.URIs) > 1: + return spiffeid.ID{}, errs.New("certificate contains more than one URI SAN") + } + return spiffeid.FromURI(cert.URIs[0]) +} + +type verifyConfig struct { + now time.Time +} + +type verifyOption func(config *verifyConfig) + +func (fn verifyOption) apply(config *verifyConfig) { + fn(config) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go new file mode 100644 index 00000000000..6ce0238fe45 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr.go @@ -0,0 +1,69 @@ +package workloadapi + +import ( + "errors" + "net" + "net/url" + "os" +) + +const ( + // SocketEnv is the environment variable holding the default Workload API + // address. + SocketEnv = "SPIFFE_ENDPOINT_SOCKET" +) + +func GetDefaultAddress() (string, bool) { + return os.LookupEnv(SocketEnv) +} + +// ValidateAddress validates that the provided address +// can be parsed to a gRPC target string for dialing +// a Workload API endpoint exposed as either a Unix +// Domain Socket or TCP socket. +func ValidateAddress(addr string) error { + _, err := parseTargetFromStringAddr(addr) + return err +} + +// parseTargetFromStringAddr parses the endpoint address and returns a gRPC target +// string for dialing. +func parseTargetFromStringAddr(addr string) (string, error) { + u, err := url.Parse(addr) + if err != nil { + return "", errors.New("workload endpoint socket is not a valid URI: " + err.Error()) + } + return parseTargetFromURLAddr(u) +} + +func parseTargetFromURLAddr(u *url.URL) (string, error) { + if u.Scheme == "tcp" { + switch { + case u.Opaque != "": + return "", errors.New("workload endpoint tcp socket URI must not be opaque") + case u.User != nil: + return "", errors.New("workload endpoint tcp socket URI must not include user info") + case u.Host == "": + return "", errors.New("workload endpoint tcp socket URI must include a host") + case u.Path != "": + return "", errors.New("workload endpoint tcp socket URI must not include a path") + case u.RawQuery != "": + return "", errors.New("workload endpoint tcp socket URI must not include query values") + case u.Fragment != "": + return "", errors.New("workload endpoint tcp socket URI must not include a fragment") + } + + ip := net.ParseIP(u.Hostname()) + if ip == nil { + return "", errors.New("workload endpoint tcp socket URI host component must be an IP:port") + } + port := u.Port() + if port == "" { + return "", errors.New("workload endpoint tcp socket URI host component must include a port") + } + + return net.JoinHostPort(ip.String(), port), nil + } + + return parseTargetFromURLAddrOS(u) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_posix.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_posix.go new file mode 100644 index 00000000000..0fa3f56b780 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_posix.go @@ -0,0 +1,34 @@ +//go:build !windows +// +build !windows + +package workloadapi + +import ( + "errors" + "net/url" +) + +var ( + ErrInvalidEndpointScheme = errors.New("workload endpoint socket URI must have a \"tcp\" or \"unix\" scheme") +) + +func parseTargetFromURLAddrOS(u *url.URL) (string, error) { + switch u.Scheme { + case "unix": + switch { + case u.Opaque != "": + return "", errors.New("workload endpoint unix socket URI must not be opaque") + case u.User != nil: + return "", errors.New("workload endpoint unix socket URI must not include user info") + case u.Host == "" && u.Path == "": + return "", errors.New("workload endpoint unix socket URI must include a path") + case u.RawQuery != "": + return "", errors.New("workload endpoint unix socket URI must not include query values") + case u.Fragment != "": + return "", errors.New("workload endpoint unix socket URI must not include a fragment") + } + return u.String(), nil + default: + return "", ErrInvalidEndpointScheme + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_windows.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_windows.go new file mode 100644 index 00000000000..4f9f2c3528e --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/addr_windows.go @@ -0,0 +1,33 @@ +//go:build windows +// +build windows + +package workloadapi + +import ( + "errors" + "net/url" +) + +var ( + ErrInvalidEndpointScheme = errors.New("workload endpoint socket URI must have a \"tcp\" or \"npipe\" scheme") +) + +func parseTargetFromURLAddrOS(u *url.URL) (string, error) { + switch u.Scheme { + case "npipe": + switch { + case u.Opaque == "" && u.Host != "": + return "", errors.New("workload endpoint named pipe URI must be opaque") + case u.Opaque == "": + return "", errors.New("workload endpoint named pipe URI must include an opaque part") + case u.RawQuery != "": + return "", errors.New("workload endpoint named pipe URI must not include query values") + case u.Fragment != "": + return "", errors.New("workload endpoint named pipe URI must not include a fragment") + } + + return namedPipeTarget(u.Opaque), nil + default: + return "", ErrInvalidEndpointScheme + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go new file mode 100644 index 00000000000..b6ef1ed53fd --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/backoff.go @@ -0,0 +1,34 @@ +package workloadapi + +import ( + "math" + "time" +) + +// backoff defines an linear backoff policy. +type backoff struct { + InitialDelay time.Duration + MaxDelay time.Duration + n int +} + +func newBackoff() *backoff { + return &backoff{ + InitialDelay: time.Second, + MaxDelay: 30 * time.Second, + n: 0, + } +} + +// Duration returns the next wait period for the backoff. Not goroutine-safe. +func (b *backoff) Duration() time.Duration { + backoff := float64(b.n) + 1 + d := math.Min(b.InitialDelay.Seconds()*backoff, b.MaxDelay.Seconds()) + b.n++ + return time.Duration(d) * time.Second +} + +// Reset resets the backoff's state. +func (b *backoff) Reset() { + b.n = 0 +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/bundlesource.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/bundlesource.go new file mode 100644 index 00000000000..2a17dc98710 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/bundlesource.go @@ -0,0 +1,187 @@ +package workloadapi + +import ( + "context" + "crypto" + "crypto/x509" + "sync" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/spiffebundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/zeebo/errs" +) + +var bundlesourceErr = errs.Class("bundlesource") + +// BundleSource is a source of SPIFFE bundles maintained via the Workload API. +type BundleSource struct { + watcher *watcher + + mtx sync.RWMutex + x509Authorities map[spiffeid.TrustDomain][]*x509.Certificate + jwtAuthorities map[spiffeid.TrustDomain]map[string]crypto.PublicKey + + closeMtx sync.RWMutex + closed bool +} + +// NewBundleSource creates a new BundleSource. It blocks until the initial +// update has been received from the Workload API. +func NewBundleSource(ctx context.Context, options ...BundleSourceOption) (_ *BundleSource, err error) { + config := &bundleSourceConfig{} + for _, option := range options { + option.configureBundleSource(config) + } + + s := &BundleSource{ + x509Authorities: make(map[spiffeid.TrustDomain][]*x509.Certificate), + jwtAuthorities: make(map[spiffeid.TrustDomain]map[string]crypto.PublicKey), + } + + s.watcher, err = newWatcher(ctx, config.watcher, s.setX509Context, s.setJWTBundles) + if err != nil { + return nil, err + } + + return s, nil +} + +// Close closes the source, dropping the connection to the Workload API. +// Other source methods will return an error after Close has been called. +// The underlying Workload API client will also be closed if it is owned by +// the BundleSource (i.e. not provided via the WithClient option). +func (s *BundleSource) Close() error { + s.closeMtx.Lock() + s.closed = true + s.closeMtx.Unlock() + + return s.watcher.Close() +} + +// GetBundleForTrustDomain returns the SPIFFE bundle for the given trust +// domain. It implements the spiffebundle.Source interface. +func (s *BundleSource) GetBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*spiffebundle.Bundle, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + s.mtx.RLock() + defer s.mtx.RUnlock() + + x509Authorities, hasX509Authorities := s.x509Authorities[trustDomain] + jwtAuthorities, hasJWTAuthorities := s.jwtAuthorities[trustDomain] + if !hasX509Authorities && !hasJWTAuthorities { + return nil, bundlesourceErr.New("no SPIFFE bundle for trust domain %q", trustDomain) + } + bundle := spiffebundle.New(trustDomain) + if hasX509Authorities { + bundle.SetX509Authorities(x509Authorities) + } + if hasJWTAuthorities { + bundle.SetJWTAuthorities(jwtAuthorities) + } + return bundle, nil +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the x509bundle.Source interface. +func (s *BundleSource) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*x509bundle.Bundle, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + s.mtx.RLock() + defer s.mtx.RUnlock() + + x509Authorities, hasX509Authorities := s.x509Authorities[trustDomain] + if !hasX509Authorities { + return nil, bundlesourceErr.New("no X.509 bundle for trust domain %q", trustDomain) + } + return x509bundle.FromX509Authorities(trustDomain, x509Authorities), nil +} + +// GetJWTBundleForTrustDomain returns the JWT bundle for the given trust +// domain. It implements the jwtbundle.Source interface. +func (s *BundleSource) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*jwtbundle.Bundle, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + s.mtx.RLock() + defer s.mtx.RUnlock() + + jwtAuthorities, hasJWTAuthorities := s.jwtAuthorities[trustDomain] + if !hasJWTAuthorities { + return nil, bundlesourceErr.New("no JWT bundle for trust domain %q", trustDomain) + } + return jwtbundle.FromJWTAuthorities(trustDomain, jwtAuthorities), nil +} + +// WaitUntilUpdated waits until the source is updated or the context is done, +// in which case ctx.Err() is returned. +func (s *BundleSource) WaitUntilUpdated(ctx context.Context) error { + return s.watcher.WaitUntilUpdated(ctx) +} + +// Updated returns a channel that is sent on whenever the source is updated. +func (s *BundleSource) Updated() <-chan struct{} { + return s.watcher.Updated() +} + +func (s *BundleSource) setX509Context(x509Context *X509Context) { + s.mtx.Lock() + defer s.mtx.Unlock() + + newBundles := x509Context.Bundles.Bundles() + + // Add/replace the X.509 authorities from the X.509 context. Track the trust + // domains represented in the new X.509 context so we can determine which + // existing trust domains are no longer represented. + trustDomains := make(map[spiffeid.TrustDomain]struct{}, len(newBundles)) + for _, newBundle := range newBundles { + trustDomains[newBundle.TrustDomain()] = struct{}{} + s.x509Authorities[newBundle.TrustDomain()] = newBundle.X509Authorities() + } + + // Remove the X.509 authority entries for trust domains no longer + // represented in the X.509 context. + for existingTD := range s.x509Authorities { + if _, ok := trustDomains[existingTD]; ok { + continue + } + delete(s.x509Authorities, existingTD) + } +} + +func (s *BundleSource) setJWTBundles(bundles *jwtbundle.Set) { + s.mtx.Lock() + defer s.mtx.Unlock() + + newBundles := bundles.Bundles() + + // Add/replace the JWT authorities from the JWT bundles. Track the trust + // domains represented in the new JWT bundles so we can determine which + // existing trust domains are no longer represented. + trustDomains := make(map[spiffeid.TrustDomain]struct{}, len(newBundles)) + for _, newBundle := range newBundles { + trustDomains[newBundle.TrustDomain()] = struct{}{} + s.jwtAuthorities[newBundle.TrustDomain()] = newBundle.JWTAuthorities() + } + + // Remove the JWT authority entries for trust domains no longer represented + // in the JWT bundles. + for existingTD := range s.jwtAuthorities { + if _, ok := trustDomains[existingTD]; ok { + continue + } + delete(s.jwtAuthorities, existingTD) + } +} + +func (s *BundleSource) checkClosed() error { + s.closeMtx.RLock() + defer s.closeMtx.RUnlock() + if s.closed { + return bundlesourceErr.New("source is closed") + } + return nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go new file mode 100644 index 00000000000..4ea4c147d61 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client.go @@ -0,0 +1,548 @@ +package workloadapi + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "time" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/logger" + "github.com/spiffe/go-spiffe/v2/proto/spiffe/workload" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Client is a Workload API client. +type Client struct { + conn *grpc.ClientConn + wlClient workload.SpiffeWorkloadAPIClient + config clientConfig +} + +// New dials the Workload API and returns a client. +func New(ctx context.Context, options ...ClientOption) (*Client, error) { + c := &Client{ + config: defaultClientConfig(), + } + for _, opt := range options { + opt.configureClient(&c.config) + } + + err := c.setAddress() + if err != nil { + return nil, err + } + + c.conn, err = c.newConn(ctx) + if err != nil { + return nil, err + } + + c.wlClient = workload.NewSpiffeWorkloadAPIClient(c.conn) + return c, nil +} + +// Close closes the client. +func (c *Client) Close() error { + return c.conn.Close() +} + +// FetchX509SVID fetches the default X509-SVID, i.e. the first in the list +// returned by the Workload API. +func (c *Client) FetchX509SVID(ctx context.Context) (*x509svid.SVID, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + stream, err := c.wlClient.FetchX509SVID(ctx, &workload.X509SVIDRequest{}) + if err != nil { + return nil, err + } + + resp, err := stream.Recv() + if err != nil { + return nil, err + } + + svids, err := parseX509SVIDs(resp, true) + if err != nil { + return nil, err + } + + return svids[0], nil +} + +// FetchX509SVIDs fetches all X509-SVIDs. +func (c *Client) FetchX509SVIDs(ctx context.Context) ([]*x509svid.SVID, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + stream, err := c.wlClient.FetchX509SVID(ctx, &workload.X509SVIDRequest{}) + if err != nil { + return nil, err + } + + resp, err := stream.Recv() + if err != nil { + return nil, err + } + + return parseX509SVIDs(resp, false) +} + +// FetchX509Bundles fetches the X.509 bundles. +func (c *Client) FetchX509Bundles(ctx context.Context) (*x509bundle.Set, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + stream, err := c.wlClient.FetchX509Bundles(ctx, &workload.X509BundlesRequest{}) + if err != nil { + return nil, err + } + resp, err := stream.Recv() + if err != nil { + return nil, err + } + + return parseX509BundlesResponse(resp) +} + +// WatchX509Bundles watches for changes to the X.509 bundles. The watcher receives +// the updated X.509 bundles. +func (c *Client) WatchX509Bundles(ctx context.Context, watcher X509BundleWatcher) error { + backoff := newBackoff() + for { + err := c.watchX509Bundles(ctx, watcher, backoff) + watcher.OnX509BundlesWatchError(err) + err = c.handleWatchError(ctx, err, backoff) + if err != nil { + return err + } + } +} + +// FetchX509Context fetches the X.509 context, which contains both X509-SVIDs +// and X.509 bundles. +func (c *Client) FetchX509Context(ctx context.Context) (*X509Context, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + stream, err := c.wlClient.FetchX509SVID(ctx, &workload.X509SVIDRequest{}) + if err != nil { + return nil, err + } + + resp, err := stream.Recv() + if err != nil { + return nil, err + } + + return parseX509Context(resp) +} + +// WatchX509Context watches for updates to the X.509 context. The watcher +// receives the updated X.509 context. +func (c *Client) WatchX509Context(ctx context.Context, watcher X509ContextWatcher) error { + backoff := newBackoff() + for { + err := c.watchX509Context(ctx, watcher, backoff) + watcher.OnX509ContextWatchError(err) + err = c.handleWatchError(ctx, err, backoff) + if err != nil { + return err + } + } +} + +// FetchJWTSVID fetches a JWT-SVID. +func (c *Client) FetchJWTSVID(ctx context.Context, params jwtsvid.Params) (*jwtsvid.SVID, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + audience := append([]string{params.Audience}, params.ExtraAudiences...) + resp, err := c.wlClient.FetchJWTSVID(ctx, &workload.JWTSVIDRequest{ + SpiffeId: params.Subject.String(), + Audience: audience, + }) + if err != nil { + return nil, err + } + + svids, err := parseJWTSVIDs(resp, audience, true) + if err != nil { + return nil, err + } + + return svids[0], nil +} + +// FetchJWTSVIDs fetches all JWT-SVIDs. +func (c *Client) FetchJWTSVIDs(ctx context.Context, params jwtsvid.Params) ([]*jwtsvid.SVID, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + audience := append([]string{params.Audience}, params.ExtraAudiences...) + resp, err := c.wlClient.FetchJWTSVID(ctx, &workload.JWTSVIDRequest{ + SpiffeId: params.Subject.String(), + Audience: audience, + }) + if err != nil { + return nil, err + } + + return parseJWTSVIDs(resp, audience, false) +} + +// FetchJWTBundles fetches the JWT bundles for JWT-SVID validation, keyed +// by a SPIFFE ID of the trust domain to which they belong. +func (c *Client) FetchJWTBundles(ctx context.Context) (*jwtbundle.Set, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + stream, err := c.wlClient.FetchJWTBundles(ctx, &workload.JWTBundlesRequest{}) + if err != nil { + return nil, err + } + + resp, err := stream.Recv() + if err != nil { + return nil, err + } + + return parseJWTSVIDBundles(resp) +} + +// WatchJWTBundles watches for changes to the JWT bundles. The watcher receives +// the updated JWT bundles. +func (c *Client) WatchJWTBundles(ctx context.Context, watcher JWTBundleWatcher) error { + backoff := newBackoff() + for { + err := c.watchJWTBundles(ctx, watcher, backoff) + watcher.OnJWTBundlesWatchError(err) + err = c.handleWatchError(ctx, err, backoff) + if err != nil { + return err + } + } +} + +// ValidateJWTSVID validates the JWT-SVID token. The parsed and validated +// JWT-SVID is returned. +func (c *Client) ValidateJWTSVID(ctx context.Context, token, audience string) (*jwtsvid.SVID, error) { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + _, err := c.wlClient.ValidateJWTSVID(ctx, &workload.ValidateJWTSVIDRequest{ + Svid: token, + Audience: audience, + }) + if err != nil { + return nil, err + } + + return jwtsvid.ParseInsecure(token, []string{audience}) +} + +func (c *Client) newConn(ctx context.Context) (*grpc.ClientConn, error) { + c.config.dialOptions = append(c.config.dialOptions, grpc.WithTransportCredentials(insecure.NewCredentials())) + c.appendDialOptionsOS() + return grpc.DialContext(ctx, c.config.address, c.config.dialOptions...) +} + +func (c *Client) handleWatchError(ctx context.Context, err error, backoff *backoff) error { + code := status.Code(err) + if code == codes.Canceled { + return err + } + + if code == codes.InvalidArgument { + c.config.log.Errorf("Canceling watch: %v", err) + return err + } + + c.config.log.Errorf("Failed to watch the Workload API: %v", err) + retryAfter := backoff.Duration() + c.config.log.Debugf("Retrying watch in %s", retryAfter) + select { + case <-time.After(retryAfter): + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} + +func (c *Client) watchX509Context(ctx context.Context, watcher X509ContextWatcher, backoff *backoff) error { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + c.config.log.Debugf("Watching X.509 contexts") + stream, err := c.wlClient.FetchX509SVID(ctx, &workload.X509SVIDRequest{}) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + return err + } + + backoff.Reset() + x509Context, err := parseX509Context(resp) + if err != nil { + c.config.log.Errorf("Failed to parse X509-SVID response: %v", err) + watcher.OnX509ContextWatchError(err) + continue + } + watcher.OnX509ContextUpdate(x509Context) + } +} + +func (c *Client) watchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, backoff *backoff) error { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + c.config.log.Debugf("Watching JWT bundles") + stream, err := c.wlClient.FetchJWTBundles(ctx, &workload.JWTBundlesRequest{}) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + return err + } + + backoff.Reset() + jwtbundleSet, err := parseJWTSVIDBundles(resp) + if err != nil { + c.config.log.Errorf("Failed to parse JWT bundle response: %v", err) + watcher.OnJWTBundlesWatchError(err) + continue + } + watcher.OnJWTBundlesUpdate(jwtbundleSet) + } +} + +func (c *Client) watchX509Bundles(ctx context.Context, watcher X509BundleWatcher, backoff *backoff) error { + ctx, cancel := context.WithCancel(withHeader(ctx)) + defer cancel() + + c.config.log.Debugf("Watching X.509 bundles") + stream, err := c.wlClient.FetchX509Bundles(ctx, &workload.X509BundlesRequest{}) + if err != nil { + return err + } + + for { + resp, err := stream.Recv() + if err != nil { + return err + } + + backoff.Reset() + x509bundleSet, err := parseX509BundlesResponse(resp) + if err != nil { + c.config.log.Errorf("Failed to parse X.509 bundle response: %v", err) + watcher.OnX509BundlesWatchError(err) + continue + } + watcher.OnX509BundlesUpdate(x509bundleSet) + } +} + +// X509ContextWatcher receives X509Context updates from the Workload API. +type X509ContextWatcher interface { + // OnX509ContextUpdate is called with the latest X.509 context retrieved + // from the Workload API. + OnX509ContextUpdate(*X509Context) + + // OnX509ContextWatchError is called when there is a problem establishing + // or maintaining connectivity with the Workload API. + OnX509ContextWatchError(error) +} + +// JWTBundleWatcher receives JWT bundle updates from the Workload API. +type JWTBundleWatcher interface { + // OnJWTBundlesUpdate is called with the latest JWT bundle set retrieved + // from the Workload API. + OnJWTBundlesUpdate(*jwtbundle.Set) + + // OnJWTBundlesWatchError is called when there is a problem establishing + // or maintaining connectivity with the Workload API. + OnJWTBundlesWatchError(error) +} + +// X509BundleWatcher receives X.509 bundle updates from the Workload API. +type X509BundleWatcher interface { + // OnX509BundlesUpdate is called with the latest X.509 bundle set retrieved + // from the Workload API. + OnX509BundlesUpdate(*x509bundle.Set) + + // OnX509BundlesWatchError is called when there is a problem establishing + // or maintaining connectivity with the Workload API. + OnX509BundlesWatchError(error) +} + +func withHeader(ctx context.Context) context.Context { + header := metadata.Pairs("workload.spiffe.io", "true") + return metadata.NewOutgoingContext(ctx, header) +} + +func defaultClientConfig() clientConfig { + return clientConfig{ + log: logger.Null, + } +} + +func parseX509Context(resp *workload.X509SVIDResponse) (*X509Context, error) { + svids, err := parseX509SVIDs(resp, false) + if err != nil { + return nil, err + } + + bundles, err := parseX509Bundles(resp) + if err != nil { + return nil, err + } + + return &X509Context{ + SVIDs: svids, + Bundles: bundles, + }, nil +} + +// parseX509SVIDs parses one or all of the SVIDs in the response. If firstOnly +// is true, then only the first SVID in the response is parsed and returned. +// Otherwise all SVIDs are parsed and returned. +func parseX509SVIDs(resp *workload.X509SVIDResponse, firstOnly bool) ([]*x509svid.SVID, error) { + n := len(resp.Svids) + if n == 0 { + return nil, errors.New("no SVIDs in response") + } + if firstOnly { + n = 1 + } + + svids := make([]*x509svid.SVID, 0, n) + for i := 0; i < n; i++ { + svid := resp.Svids[i] + s, err := x509svid.ParseRaw(svid.X509Svid, svid.X509SvidKey) + if err != nil { + return nil, err + } + svids = append(svids, s) + } + + return svids, nil +} + +func parseX509Bundles(resp *workload.X509SVIDResponse) (*x509bundle.Set, error) { + bundles := []*x509bundle.Bundle{} + for _, svid := range resp.Svids { + b, err := parseX509Bundle(svid.SpiffeId, svid.Bundle) + if err != nil { + return nil, err + } + bundles = append(bundles, b) + } + + for tdID, bundle := range resp.FederatedBundles { + b, err := parseX509Bundle(tdID, bundle) + if err != nil { + return nil, err + } + bundles = append(bundles, b) + } + + return x509bundle.NewSet(bundles...), nil +} + +func parseX509Bundle(spiffeID string, bundle []byte) (*x509bundle.Bundle, error) { + td, err := spiffeid.TrustDomainFromString(spiffeID) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(bundle) + if err != nil { + return nil, err + } + if len(certs) == 0 { + return nil, fmt.Errorf("empty X.509 bundle for trust domain %q", td) + } + return x509bundle.FromX509Authorities(td, certs), nil +} + +func parseX509BundlesResponse(resp *workload.X509BundlesResponse) (*x509bundle.Set, error) { + bundles := []*x509bundle.Bundle{} + + for tdID, b := range resp.Bundles { + td, err := spiffeid.TrustDomainFromString(tdID) + if err != nil { + return nil, err + } + + b, err := x509bundle.ParseRaw(td, b) + if err != nil { + return nil, err + } + bundles = append(bundles, b) + } + + return x509bundle.NewSet(bundles...), nil +} + +// parseJWTSVIDs parses one or all of the SVIDs in the response. If firstOnly +// is true, then only the first SVID in the response is parsed and returned. +// Otherwise all SVIDs are parsed and returned. +func parseJWTSVIDs(resp *workload.JWTSVIDResponse, audience []string, firstOnly bool) ([]*jwtsvid.SVID, error) { + n := len(resp.Svids) + if n == 0 { + return nil, errors.New("there were no SVIDs in the response") + } + if firstOnly { + n = 1 + } + + svids := make([]*jwtsvid.SVID, 0, n) + for i := 0; i < n; i++ { + svid := resp.Svids[i] + s, err := jwtsvid.ParseInsecure(svid.Svid, audience) + if err != nil { + return nil, err + } + svids = append(svids, s) + } + + return svids, nil +} + +func parseJWTSVIDBundles(resp *workload.JWTBundlesResponse) (*jwtbundle.Set, error) { + bundles := []*jwtbundle.Bundle{} + + for tdID, b := range resp.Bundles { + td, err := spiffeid.TrustDomainFromString(tdID) + if err != nil { + return nil, err + } + + b, err := jwtbundle.Parse(td, b) + if err != nil { + return nil, err + } + bundles = append(bundles, b) + } + + return jwtbundle.NewSet(bundles...), nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go new file mode 100644 index 00000000000..8e91a28fa4c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_posix.go @@ -0,0 +1,29 @@ +//go:build !windows +// +build !windows + +package workloadapi + +import "errors" + +// appendDialOptionsOS appends OS specific dial options +func (c *Client) appendDialOptionsOS() { + // No options to add in this platform +} +func (c *Client) setAddress() error { + if c.config.namedPipeName != "" { + // Purely defensive. This should never happen. + return errors.New("named pipes not supported in this platform") + } + + if c.config.address == "" { + var ok bool + c.config.address, ok = GetDefaultAddress() + if !ok { + return errors.New("workload endpoint socket address is not configured") + } + } + + var err error + c.config.address, err = parseTargetFromStringAddr(c.config.address) + return err +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go new file mode 100644 index 00000000000..fb628fccc1d --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/client_windows.go @@ -0,0 +1,57 @@ +//go:build windows +// +build windows + +package workloadapi + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/Microsoft/go-winio" + "google.golang.org/grpc" +) + +// appendDialOptionsOS appends OS specific dial options +func (c *Client) appendDialOptionsOS() { + if c.config.namedPipeName != "" { + // Use the dialer to connect to named pipes only if a named pipe + // is defined (i.e. WithNamedPipeName is used). + c.config.dialOptions = append(c.config.dialOptions, grpc.WithContextDialer(winio.DialPipeContext)) + } +} + +func (c *Client) setAddress() error { + var err error + if c.config.namedPipeName != "" { + if c.config.address != "" { + return errors.New("only one of WithAddr or WithNamedPipeName options can be used, not both") + } + c.config.address = namedPipeTarget(c.config.namedPipeName) + return nil + } + + if c.config.address == "" { + var ok bool + c.config.address, ok = GetDefaultAddress() + if !ok { + return errors.New("workload endpoint socket address is not configured") + } + } + + if strings.HasPrefix(c.config.address, "npipe:") { + // Use the dialer to connect to named pipes only if the gRPC target + // string has the "npipe" scheme + c.config.dialOptions = append(c.config.dialOptions, grpc.WithContextDialer(winio.DialPipeContext)) + } + + c.config.address, err = parseTargetFromStringAddr(c.config.address) + return err +} + +// namedPipeTarget returns a target string suitable for +// dialing the endpoint address based on the provided +// pipe name. +func namedPipeTarget(pipeName string) string { + return `\\.\` + filepath.Join("pipe", pipeName) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/convenience.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/convenience.go new file mode 100644 index 00000000000..f42c226fa1b --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/convenience.go @@ -0,0 +1,124 @@ +package workloadapi + +import ( + "context" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" +) + +// FetchX509SVID fetches the default X509-SVID, i.e. the first in the list +// returned by the Workload API. +func FetchX509SVID(ctx context.Context, options ...ClientOption) (*x509svid.SVID, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchX509SVID(ctx) +} + +// FetchX509SVIDs fetches all X509-SVIDs. +func FetchX509SVIDs(ctx context.Context, options ...ClientOption) ([]*x509svid.SVID, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchX509SVIDs(ctx) +} + +// FetchX509Bundle fetches the X.509 bundles. +func FetchX509Bundles(ctx context.Context, options ...ClientOption) (*x509bundle.Set, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchX509Bundles(ctx) +} + +// FetchX509Context fetches the X.509 context, which contains both X509-SVIDs +// and X.509 bundles. +func FetchX509Context(ctx context.Context, options ...ClientOption) (*X509Context, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchX509Context(ctx) +} + +// WatchX509Context watches for updates to the X.509 context. +func WatchX509Context(ctx context.Context, watcher X509ContextWatcher, options ...ClientOption) error { + c, err := New(ctx, options...) + if err != nil { + return err + } + defer c.Close() + return c.WatchX509Context(ctx, watcher) +} + +// FetchJWTSVID fetches a JWT-SVID. +func FetchJWTSVID(ctx context.Context, params jwtsvid.Params, options ...ClientOption) (*jwtsvid.SVID, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchJWTSVID(ctx, params) +} + +// FetchJWTSVID fetches all JWT-SVIDs. +func FetchJWTSVIDs(ctx context.Context, params jwtsvid.Params, options ...ClientOption) ([]*jwtsvid.SVID, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchJWTSVIDs(ctx, params) +} + +// FetchJWTBundles fetches the JWT bundles for JWT-SVID validation, keyed +// by a SPIFFE ID of the trust domain to which they belong. +func FetchJWTBundles(ctx context.Context, options ...ClientOption) (*jwtbundle.Set, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.FetchJWTBundles(ctx) +} + +// WatchJWTBundles watches for changes to the JWT bundles. +func WatchJWTBundles(ctx context.Context, watcher JWTBundleWatcher, options ...ClientOption) error { + c, err := New(ctx, options...) + if err != nil { + return err + } + defer c.Close() + return c.WatchJWTBundles(ctx, watcher) +} + +// WatchX509Bundles watches for changes to the X.509 bundles. +func WatchX509Bundles(ctx context.Context, watcher X509BundleWatcher, options ...ClientOption) error { + c, err := New(ctx, options...) + if err != nil { + return err + } + defer c.Close() + return c.WatchX509Bundles(ctx, watcher) +} + +// ValidateJWTSVID validates the JWT-SVID token. The parsed and validated +// JWT-SVID is returned. +func ValidateJWTSVID(ctx context.Context, token, audience string, options ...ClientOption) (*jwtsvid.SVID, error) { + c, err := New(ctx, options...) + if err != nil { + return nil, err + } + defer c.Close() + return c.ValidateJWTSVID(ctx, token, audience) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go new file mode 100644 index 00000000000..6bfe06e469c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/jwtsource.go @@ -0,0 +1,108 @@ +package workloadapi + +import ( + "context" + "sync" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/zeebo/errs" +) + +var jwtsourceErr = errs.Class("jwtsource") + +// JWTSource is a source of JWT-SVID and JWT bundles maintained via the +// Workload API. +type JWTSource struct { + watcher *watcher + + mtx sync.RWMutex + bundles *jwtbundle.Set + + closeMtx sync.RWMutex + closed bool +} + +// NewJWTSource creates a new JWTSource. It blocks until the initial update +// has been received from the Workload API. +func NewJWTSource(ctx context.Context, options ...JWTSourceOption) (_ *JWTSource, err error) { + config := &jwtSourceConfig{} + for _, option := range options { + option.configureJWTSource(config) + } + + s := &JWTSource{} + + s.watcher, err = newWatcher(ctx, config.watcher, nil, s.setJWTBundles) + if err != nil { + return nil, err + } + + return s, nil +} + +// Close closes the source, dropping the connection to the Workload API. +// Other source methods will return an error after Close has been called. +// The underlying Workload API client will also be closed if it is owned by +// the JWTSource (i.e. not provided via the WithClient option). +func (s *JWTSource) Close() error { + s.closeMtx.Lock() + s.closed = true + s.closeMtx.Unlock() + + return s.watcher.Close() +} + +// FetchJWTSVID fetches a JWT-SVID from the source with the given parameters. +// It implements the jwtsvid.Source interface. +func (s *JWTSource) FetchJWTSVID(ctx context.Context, params jwtsvid.Params) (*jwtsvid.SVID, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + return s.watcher.client.FetchJWTSVID(ctx, params) +} + +// FetchJWTSVIDs fetches all JWT-SVIDs from the source with the given parameters. +// It implements the jwtsvid.Source interface. +func (s *JWTSource) FetchJWTSVIDs(ctx context.Context, params jwtsvid.Params) ([]*jwtsvid.SVID, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + return s.watcher.client.FetchJWTSVIDs(ctx, params) +} + +// GetJWTBundleForTrustDomain returns the JWT bundle for the given trust +// domain. It implements the jwtbundle.Source interface. +func (s *JWTSource) GetJWTBundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*jwtbundle.Bundle, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + return s.bundles.GetJWTBundleForTrustDomain(trustDomain) +} + +// WaitUntilUpdated waits until the source is updated or the context is done, +// in which case ctx.Err() is returned. +func (s *JWTSource) WaitUntilUpdated(ctx context.Context) error { + return s.watcher.WaitUntilUpdated(ctx) +} + +// Updated returns a channel that is sent on whenever the source is updated. +func (s *JWTSource) Updated() <-chan struct{} { + return s.watcher.Updated() +} + +func (s *JWTSource) setJWTBundles(bundles *jwtbundle.Set) { + s.mtx.Lock() + defer s.mtx.Unlock() + s.bundles = bundles +} + +func (s *JWTSource) checkClosed() error { + s.closeMtx.RLock() + defer s.closeMtx.RUnlock() + if s.closed { + return jwtsourceErr.New("source is closed") + } + return nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go new file mode 100644 index 00000000000..00cab7d16ce --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option.go @@ -0,0 +1,147 @@ +package workloadapi + +import ( + "github.com/spiffe/go-spiffe/v2/logger" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "google.golang.org/grpc" +) + +// ClientOption is an option used when creating a new Client. +type ClientOption interface { + configureClient(*clientConfig) +} + +// WithAddr provides an address for the Workload API. The value of the +// SPIFFE_ENDPOINT_SOCKET environment variable will be used if the option +// is unused. +func WithAddr(addr string) ClientOption { + return clientOption(func(c *clientConfig) { + c.address = addr + }) +} + +// WithDialOptions provides extra GRPC dialing options when dialing the +// Workload API. +func WithDialOptions(options ...grpc.DialOption) ClientOption { + return clientOption(func(c *clientConfig) { + c.dialOptions = append(c.dialOptions, options...) + }) +} + +// WithLogger provides a logger to the Client. +func WithLogger(logger logger.Logger) ClientOption { + return clientOption(func(c *clientConfig) { + c.log = logger + }) +} + +// SourceOption are options that are shared among all option types. +type SourceOption interface { + configureX509Source(*x509SourceConfig) + configureJWTSource(*jwtSourceConfig) + configureBundleSource(*bundleSourceConfig) +} + +// WithClient provides a Client for the source to use. If unset, a new Client +// will be created. +func WithClient(client *Client) SourceOption { + return withClient{client: client} +} + +// WithClientOptions controls the options used to create a new Client for the +// source. This option will be ignored if WithClient is used. +func WithClientOptions(options ...ClientOption) SourceOption { + return withClientOptions{options: options} +} + +// X509SourceOption is an option for the X509Source. A SourceOption is also an +// X509SourceOption. +type X509SourceOption interface { + configureX509Source(*x509SourceConfig) +} + +// WithDefaultX509SVIDPicker provides a function that is used to determine the +// default X509-SVID when more than one is provided by the Workload API. By +// default, the first X509-SVID in the list returned by the Workload API is +// used. +func WithDefaultX509SVIDPicker(picker func([]*x509svid.SVID) *x509svid.SVID) X509SourceOption { + return withDefaultX509SVIDPicker{picker: picker} +} + +// JWTSourceOption is an option for the JWTSource. A SourceOption is also a +// JWTSourceOption. +type JWTSourceOption interface { + configureJWTSource(*jwtSourceConfig) +} + +// BundleSourceOption is an option for the BundleSource. A SourceOption is also +// a BundleSourceOption. +type BundleSourceOption interface { + configureBundleSource(*bundleSourceConfig) +} + +type clientConfig struct { + address string + namedPipeName string + dialOptions []grpc.DialOption + log logger.Logger +} + +type clientOption func(*clientConfig) + +func (fn clientOption) configureClient(config *clientConfig) { + fn(config) +} + +type x509SourceConfig struct { + watcher watcherConfig + picker func([]*x509svid.SVID) *x509svid.SVID +} + +type jwtSourceConfig struct { + watcher watcherConfig +} + +type bundleSourceConfig struct { + watcher watcherConfig +} + +type withClient struct { + client *Client +} + +func (o withClient) configureX509Source(config *x509SourceConfig) { + config.watcher.client = o.client +} + +func (o withClient) configureJWTSource(config *jwtSourceConfig) { + config.watcher.client = o.client +} + +func (o withClient) configureBundleSource(config *bundleSourceConfig) { + config.watcher.client = o.client +} + +type withClientOptions struct { + options []ClientOption +} + +func (o withClientOptions) configureX509Source(config *x509SourceConfig) { + config.watcher.clientOptions = o.options +} + +func (o withClientOptions) configureJWTSource(config *jwtSourceConfig) { + config.watcher.clientOptions = o.options +} + +func (o withClientOptions) configureBundleSource(config *bundleSourceConfig) { + config.watcher.clientOptions = o.options +} + +type withDefaultX509SVIDPicker struct { + picker func([]*x509svid.SVID) *x509svid.SVID +} + +func (o withDefaultX509SVIDPicker) configureX509Source(config *x509SourceConfig) { + config.picker = o.picker +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option_windows.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option_windows.go new file mode 100644 index 00000000000..c06e5338ffe --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/option_windows.go @@ -0,0 +1,12 @@ +//go:build windows +// +build windows + +package workloadapi + +// WithNamedPipeName provides a Pipe Name for the Workload API +// endpoint in the form \\.\pipe\. +func WithNamedPipeName(pipeName string) ClientOption { + return clientOption(func(c *clientConfig) { + c.namedPipeName = pipeName + }) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/watcher.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/watcher.go new file mode 100644 index 00000000000..f110e07386b --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/watcher.go @@ -0,0 +1,191 @@ +package workloadapi + +import ( + "context" + "sync" + + "github.com/spiffe/go-spiffe/v2/bundle/jwtbundle" + "github.com/spiffe/go-spiffe/v2/svid/jwtsvid" + "github.com/zeebo/errs" +) + +type sourceClient interface { + WatchX509Context(context.Context, X509ContextWatcher) error + WatchJWTBundles(context.Context, JWTBundleWatcher) error + FetchJWTSVID(context.Context, jwtsvid.Params) (*jwtsvid.SVID, error) + FetchJWTSVIDs(context.Context, jwtsvid.Params) ([]*jwtsvid.SVID, error) + Close() error +} + +type watcherConfig struct { + client sourceClient + clientOptions []ClientOption +} + +type watcher struct { + updatedCh chan struct{} + + client sourceClient + ownsClient bool + + cancel func() + wg sync.WaitGroup + + closeMtx sync.Mutex + closed bool + closeErr error + + x509ContextFn func(*X509Context) + x509ContextSet chan struct{} + x509ContextSetOnce sync.Once + + jwtBundlesFn func(*jwtbundle.Set) + jwtBundlesSet chan struct{} + jwtBundlesSetOnce sync.Once +} + +func newWatcher(ctx context.Context, config watcherConfig, x509ContextFn func(*X509Context), jwtBundlesFn func(*jwtbundle.Set)) (_ *watcher, err error) { + w := &watcher{ + updatedCh: make(chan struct{}, 1), + client: config.client, + cancel: func() {}, + x509ContextFn: x509ContextFn, + x509ContextSet: make(chan struct{}), + jwtBundlesFn: jwtBundlesFn, + jwtBundlesSet: make(chan struct{}), + } + + // If this function fails, we need to clean up the source. + defer func() { + if err != nil { + err = errs.Combine(err, w.Close()) + } + }() + + // Initialize a new client unless one is provided by the options + if w.client == nil { + client, err := New(ctx, config.clientOptions...) + if err != nil { + return nil, err + } + w.client = client + w.ownsClient = true + } + + errCh := make(chan error, 2) + waitFor := func(has <-chan struct{}) error { + select { + case <-has: + return nil + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } + } + + // Kick up a background goroutine that watches the Workload API for + // updates. + var watchCtx context.Context + watchCtx, w.cancel = context.WithCancel(context.Background()) + + if w.x509ContextFn != nil { + w.wg.Add(1) + go func() { + defer w.wg.Done() + errCh <- w.client.WatchX509Context(watchCtx, w) + }() + if err := waitFor(w.x509ContextSet); err != nil { + return nil, err + } + } + + if w.jwtBundlesFn != nil { + w.wg.Add(1) + go func() { + defer w.wg.Done() + errCh <- w.client.WatchJWTBundles(watchCtx, w) + }() + if err := waitFor(w.jwtBundlesSet); err != nil { + return nil, err + } + } + + // Drain the update channel since this function blocks until an update and + // don't want callers to think there was an update on the source right + // after it was initialized. If we ever allow the watcher to be initialzed + // without waiting, this reset should be removed. + w.drainUpdated() + + return w, nil +} + +// Close closes the watcher, dropping the connection to the Workload API. +func (w *watcher) Close() error { + w.closeMtx.Lock() + defer w.closeMtx.Unlock() + + if !w.closed { + w.cancel() + w.wg.Wait() + + // Close() can be called by New() to close a partially intialized source. + // Only close the client if it has been set and the source owns it. + if w.client != nil && w.ownsClient { + w.closeErr = w.client.Close() + } + w.closed = true + } + return w.closeErr +} + +func (w *watcher) OnX509ContextUpdate(x509Context *X509Context) { + w.x509ContextFn(x509Context) + w.x509ContextSetOnce.Do(func() { + close(w.x509ContextSet) + }) + w.triggerUpdated() +} + +func (w *watcher) OnX509ContextWatchError(err error) { + // The watcher doesn't do anything special with the error. If logging is + // desired, it should be provided to the Workload API client. +} + +func (w *watcher) OnJWTBundlesUpdate(jwtBundles *jwtbundle.Set) { + w.jwtBundlesFn(jwtBundles) + w.jwtBundlesSetOnce.Do(func() { + close(w.jwtBundlesSet) + }) + w.triggerUpdated() +} + +func (w *watcher) OnJWTBundlesWatchError(error) { + // The watcher doesn't do anything special with the error. If logging is + // desired, it should be provided to the Workload API client. +} + +func (w *watcher) WaitUntilUpdated(ctx context.Context) error { + select { + case <-w.updatedCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (w *watcher) Updated() <-chan struct{} { + return w.updatedCh +} + +func (w *watcher) drainUpdated() { + select { + case <-w.updatedCh: + default: + } +} + +func (w *watcher) triggerUpdated() { + w.drainUpdated() + w.updatedCh <- struct{}{} +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509context.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509context.go new file mode 100644 index 00000000000..94a9392b459 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509context.go @@ -0,0 +1,23 @@ +package workloadapi + +import ( + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" +) + +// X509Context conveys X.509 materials from the Workload API. +type X509Context struct { + // SVIDs is a list of workload X509-SVIDs. + SVIDs []*x509svid.SVID + + // Bundles is a set of X.509 bundles. + Bundles *x509bundle.Set +} + +// Default returns the default X509-SVID (the first in the list). +// +// See the SPIFFE Workload API standard Section 5.3. +// (https://github.com/spiffe/spiffe/blob/main/standards/SPIFFE_Workload_API.md#53-default-identity) +func (x *X509Context) DefaultSVID() *x509svid.SVID { + return x.SVIDs[0] +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509source.go b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509source.go new file mode 100644 index 00000000000..fd2824fd39a --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/workloadapi/x509source.go @@ -0,0 +1,123 @@ +package workloadapi + +import ( + "context" + "sync" + + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/spiffeid" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/zeebo/errs" +) + +var x509sourceErr = errs.Class("x509source") + +// X509Source is a source of X509-SVIDs and X.509 bundles maintained via the +// Workload API. +type X509Source struct { + watcher *watcher + picker func([]*x509svid.SVID) *x509svid.SVID + + mtx sync.RWMutex + svid *x509svid.SVID + bundles *x509bundle.Set + + closeMtx sync.RWMutex + closed bool +} + +// NewX509Source creates a new X509Source. It blocks until the initial update +// has been received from the Workload API. +func NewX509Source(ctx context.Context, options ...X509SourceOption) (_ *X509Source, err error) { + config := &x509SourceConfig{} + for _, option := range options { + option.configureX509Source(config) + } + + s := &X509Source{ + picker: config.picker, + } + + s.watcher, err = newWatcher(ctx, config.watcher, s.setX509Context, nil) + if err != nil { + return nil, err + } + + return s, nil +} + +// Close closes the source, dropping the connection to the Workload API. +// Other source methods will return an error after Close has been called. +// The underlying Workload API client will also be closed if it is owned by +// the X509Source (i.e. not provided via the WithClient option). +func (s *X509Source) Close() (err error) { + s.closeMtx.Lock() + s.closed = true + s.closeMtx.Unlock() + + return s.watcher.Close() +} + +// GetX509SVID returns an X509-SVID from the source. It implements the +// x509svid.Source interface. +func (s *X509Source) GetX509SVID() (*x509svid.SVID, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + + s.mtx.RLock() + svid := s.svid + s.mtx.RUnlock() + + if svid == nil { + // This is a defensive check and should be unreachable since the source + // waits for the initial Workload API update before returning from + // New(). + return nil, x509sourceErr.New("missing X509-SVID") + } + return svid, nil +} + +// GetX509BundleForTrustDomain returns the X.509 bundle for the given trust +// domain. It implements the x509bundle.Source interface. +func (s *X509Source) GetX509BundleForTrustDomain(trustDomain spiffeid.TrustDomain) (*x509bundle.Bundle, error) { + if err := s.checkClosed(); err != nil { + return nil, err + } + + return s.bundles.GetX509BundleForTrustDomain(trustDomain) +} + +// WaitUntilUpdated waits until the source is updated or the context is done, +// in which case ctx.Err() is returned. +func (s *X509Source) WaitUntilUpdated(ctx context.Context) error { + return s.watcher.WaitUntilUpdated(ctx) +} + +// Updated returns a channel that is sent on whenever the source is updated. +func (s *X509Source) Updated() <-chan struct{} { + return s.watcher.Updated() +} + +func (s *X509Source) setX509Context(x509Context *X509Context) { + var svid *x509svid.SVID + if s.picker == nil { + svid = x509Context.DefaultSVID() + } else { + svid = s.picker(x509Context.SVIDs) + } + + s.mtx.Lock() + defer s.mtx.Unlock() + s.svid = svid + s.bundles = x509Context.Bundles +} + +func (s *X509Source) checkClosed() error { + s.closeMtx.RLock() + defer s.closeMtx.RUnlock() + if s.closed { + return x509sourceErr.New("source is closed") + } + return nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/LICENSE b/vendor/github.com/spiffe/spire-api-sdk/LICENSE new file mode 100644 index 00000000000..261eeb9e9f8 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go new file mode 100644 index 00000000000..b148b782ca5 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go @@ -0,0 +1,1473 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/server/entry/v1/entry.proto + +package entryv1 + +import ( + types "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CountEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CountEntriesRequest) Reset() { + *x = CountEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CountEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountEntriesRequest) ProtoMessage() {} + +func (x *CountEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountEntriesRequest.ProtoReflect.Descriptor instead. +func (*CountEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{0} +} + +type CountEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *CountEntriesResponse) Reset() { + *x = CountEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CountEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountEntriesResponse) ProtoMessage() {} + +func (x *CountEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountEntriesResponse.ProtoReflect.Descriptor instead. +func (*CountEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *CountEntriesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type ListEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Filters the entries returned in the response. + Filter *ListEntriesRequest_Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` + // The maximum number of results to return. The server may further + // constrain this value, or if zero, choose its own. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The next_page_token value returned from a previous request, if any. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListEntriesRequest) Reset() { + *x = ListEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest) ProtoMessage() {} + +func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{2} +} + +func (x *ListEntriesRequest) GetFilter() *ListEntriesRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ListEntriesRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +func (x *ListEntriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListEntriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type ListEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of entries. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // The page token for the next request. Empty if there are no more results. + // This field should be checked by clients even when a page_size was not + // requested, since the server may choose its own (see page_size). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListEntriesResponse) Reset() { + *x = ListEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesResponse) ProtoMessage() {} + +func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead. +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{3} +} + +func (x *ListEntriesResponse) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *ListEntriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type GetEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. ID of the entry to get. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *GetEntryRequest) Reset() { + *x = GetEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEntryRequest) ProtoMessage() {} + +func (x *GetEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEntryRequest.ProtoReflect.Descriptor instead. +func (*GetEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{4} +} + +func (x *GetEntryRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *GetEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchCreateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entries to be created. The entry ID field is output only, and will + // be ignored here. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *BatchCreateEntryRequest) Reset() { + *x = BatchCreateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryRequest) ProtoMessage() {} + +func (x *BatchCreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{5} +} + +func (x *BatchCreateEntryRequest) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *BatchCreateEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchCreateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry in the request (order is maintained). + Results []*BatchCreateEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchCreateEntryResponse) Reset() { + *x = BatchCreateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryResponse) ProtoMessage() {} + +func (x *BatchCreateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{6} +} + +func (x *BatchCreateEntryResponse) GetResults() []*BatchCreateEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type BatchUpdateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entries to be updated. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // An input mask indicating what entry fields should be updated. + InputMask *types.EntryMask `protobuf:"bytes,2,opt,name=input_mask,json=inputMask,proto3" json:"input_mask,omitempty"` + // An output mask indicating what entry fields are set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,3,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *BatchUpdateEntryRequest) Reset() { + *x = BatchUpdateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryRequest) ProtoMessage() {} + +func (x *BatchUpdateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{7} +} + +func (x *BatchUpdateEntryRequest) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *BatchUpdateEntryRequest) GetInputMask() *types.EntryMask { + if x != nil { + return x.InputMask + } + return nil +} + +func (x *BatchUpdateEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchUpdateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry in the request (order is maintained). + Results []*BatchUpdateEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchUpdateEntryResponse) Reset() { + *x = BatchUpdateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryResponse) ProtoMessage() {} + +func (x *BatchUpdateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{8} +} + +func (x *BatchUpdateEntryResponse) GetResults() []*BatchUpdateEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type BatchDeleteEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IDs of the entries to delete. + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *BatchDeleteEntryRequest) Reset() { + *x = BatchDeleteEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryRequest) ProtoMessage() {} + +func (x *BatchDeleteEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{9} +} + +func (x *BatchDeleteEntryRequest) GetIds() []string { + if x != nil { + return x.Ids + } + return nil +} + +type BatchDeleteEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry ID in the request (order is maintained). + Results []*BatchDeleteEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchDeleteEntryResponse) Reset() { + *x = BatchDeleteEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryResponse) ProtoMessage() {} + +func (x *BatchDeleteEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{10} +} + +func (x *BatchDeleteEntryResponse) GetResults() []*BatchDeleteEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type GetAuthorizedEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An output mask indicating which fields are set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,1,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *GetAuthorizedEntriesRequest) Reset() { + *x = GetAuthorizedEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAuthorizedEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizedEntriesRequest) ProtoMessage() {} + +func (x *GetAuthorizedEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizedEntriesRequest.ProtoReflect.Descriptor instead. +func (*GetAuthorizedEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{11} +} + +func (x *GetAuthorizedEntriesRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type GetAuthorizedEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The authorized entries. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *GetAuthorizedEntriesResponse) Reset() { + *x = GetAuthorizedEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAuthorizedEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizedEntriesResponse) ProtoMessage() {} + +func (x *GetAuthorizedEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizedEntriesResponse.ProtoReflect.Descriptor instead. +func (*GetAuthorizedEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{12} +} + +func (x *GetAuthorizedEntriesResponse) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +type ListEntriesRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BySpiffeId *types.SPIFFEID `protobuf:"bytes,1,opt,name=by_spiffe_id,json=bySpiffeId,proto3" json:"by_spiffe_id,omitempty"` + ByParentId *types.SPIFFEID `protobuf:"bytes,2,opt,name=by_parent_id,json=byParentId,proto3" json:"by_parent_id,omitempty"` + BySelectors *types.SelectorMatch `protobuf:"bytes,3,opt,name=by_selectors,json=bySelectors,proto3" json:"by_selectors,omitempty"` + ByFederatesWith *types.FederatesWithMatch `protobuf:"bytes,4,opt,name=by_federates_with,json=byFederatesWith,proto3" json:"by_federates_with,omitempty"` +} + +func (x *ListEntriesRequest_Filter) Reset() { + *x = ListEntriesRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest_Filter) ProtoMessage() {} + +func (x *ListEntriesRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesRequest_Filter.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest_Filter) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ListEntriesRequest_Filter) GetBySpiffeId() *types.SPIFFEID { + if x != nil { + return x.BySpiffeId + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetByParentId() *types.SPIFFEID { + if x != nil { + return x.ByParentId + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetBySelectors() *types.SelectorMatch { + if x != nil { + return x.BySelectors + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetByFederatesWith() *types.FederatesWithMatch { + if x != nil { + return x.ByFederatesWith + } + return nil +} + +type BatchCreateEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. If status code will be + // ALREADY_EXISTS if a similar entry already exists. An entry is + // similar if it has the same spiffe_id, parent_id, and selectors. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The entry that was created (.e.g status code is OK) or that already + // exists (i.e. status code is ALREADY_EXISTS). + // + // If the status code is any other value, this field will not be set. + Entry *types.Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *BatchCreateEntryResponse_Result) Reset() { + *x = BatchCreateEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryResponse_Result) ProtoMessage() {} + +func (x *BatchCreateEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *BatchCreateEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchCreateEntryResponse_Result) GetEntry() *types.Entry { + if x != nil { + return x.Entry + } + return nil +} + +type BatchUpdateEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The entry that was updated. If the status is OK, it will be the + // entry that was updated. If the status is any other value, this field + // will not be set. + Entry *types.Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *BatchUpdateEntryResponse_Result) Reset() { + *x = BatchUpdateEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryResponse_Result) ProtoMessage() {} + +func (x *BatchUpdateEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *BatchUpdateEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchUpdateEntryResponse_Result) GetEntry() *types.Entry { + if x != nil { + return x.Entry + } + return nil +} + +type BatchDeleteEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The ID of the entry that was deleted. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *BatchDeleteEntryResponse_Result) Reset() { + *x = BatchDeleteEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryResponse_Result) ProtoMessage() {} + +func (x *BatchDeleteEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *BatchDeleteEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchDeleteEntryResponse_Result) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +var File_spire_api_server_entry_v1_entry_proto protoreflect.FileDescriptor + +var file_spire_api_server_entry_v1_entry_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x1a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x77, 0x69, 0x74, 0x68, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2c, 0x0a, 0x14, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xf4, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4c, + 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x96, 0x02, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x0c, 0x62, 0x79, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, + 0x44, 0x52, 0x0a, 0x62, 0x79, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, + 0x0c, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x0a, + 0x62, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x0c, 0x62, 0x79, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x0b, 0x62, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4f, 0x0a, + 0x11, 0x62, 0x79, 0x5f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x69, + 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x0f, 0x62, + 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x22, 0x6f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x5e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0x88, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3b, 0x0a, + 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd9, 0x01, 0x0a, 0x18, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, + 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xc3, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x12, + 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd9, 0x01, 0x0a, + 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x67, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x2b, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x49, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x22, 0x5a, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0x50, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x32, 0xb7, 0x06, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x0c, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0b, + 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x08, 0x47, 0x65, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x49, 0x5a, 0x47, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, + 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_server_entry_v1_entry_proto_rawDescOnce sync.Once + file_spire_api_server_entry_v1_entry_proto_rawDescData = file_spire_api_server_entry_v1_entry_proto_rawDesc +) + +func file_spire_api_server_entry_v1_entry_proto_rawDescGZIP() []byte { + file_spire_api_server_entry_v1_entry_proto_rawDescOnce.Do(func() { + file_spire_api_server_entry_v1_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_server_entry_v1_entry_proto_rawDescData) + }) + return file_spire_api_server_entry_v1_entry_proto_rawDescData +} + +var file_spire_api_server_entry_v1_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_spire_api_server_entry_v1_entry_proto_goTypes = []interface{}{ + (*CountEntriesRequest)(nil), // 0: spire.api.server.entry.v1.CountEntriesRequest + (*CountEntriesResponse)(nil), // 1: spire.api.server.entry.v1.CountEntriesResponse + (*ListEntriesRequest)(nil), // 2: spire.api.server.entry.v1.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: spire.api.server.entry.v1.ListEntriesResponse + (*GetEntryRequest)(nil), // 4: spire.api.server.entry.v1.GetEntryRequest + (*BatchCreateEntryRequest)(nil), // 5: spire.api.server.entry.v1.BatchCreateEntryRequest + (*BatchCreateEntryResponse)(nil), // 6: spire.api.server.entry.v1.BatchCreateEntryResponse + (*BatchUpdateEntryRequest)(nil), // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest + (*BatchUpdateEntryResponse)(nil), // 8: spire.api.server.entry.v1.BatchUpdateEntryResponse + (*BatchDeleteEntryRequest)(nil), // 9: spire.api.server.entry.v1.BatchDeleteEntryRequest + (*BatchDeleteEntryResponse)(nil), // 10: spire.api.server.entry.v1.BatchDeleteEntryResponse + (*GetAuthorizedEntriesRequest)(nil), // 11: spire.api.server.entry.v1.GetAuthorizedEntriesRequest + (*GetAuthorizedEntriesResponse)(nil), // 12: spire.api.server.entry.v1.GetAuthorizedEntriesResponse + (*ListEntriesRequest_Filter)(nil), // 13: spire.api.server.entry.v1.ListEntriesRequest.Filter + (*BatchCreateEntryResponse_Result)(nil), // 14: spire.api.server.entry.v1.BatchCreateEntryResponse.Result + (*BatchUpdateEntryResponse_Result)(nil), // 15: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + (*BatchDeleteEntryResponse_Result)(nil), // 16: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + (*types.EntryMask)(nil), // 17: spire.api.types.EntryMask + (*types.Entry)(nil), // 18: spire.api.types.Entry + (*types.SPIFFEID)(nil), // 19: spire.api.types.SPIFFEID + (*types.SelectorMatch)(nil), // 20: spire.api.types.SelectorMatch + (*types.FederatesWithMatch)(nil), // 21: spire.api.types.FederatesWithMatch + (*types.Status)(nil), // 22: spire.api.types.Status +} +var file_spire_api_server_entry_v1_entry_proto_depIdxs = []int32{ + 13, // 0: spire.api.server.entry.v1.ListEntriesRequest.filter:type_name -> spire.api.server.entry.v1.ListEntriesRequest.Filter + 17, // 1: spire.api.server.entry.v1.ListEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 2: spire.api.server.entry.v1.ListEntriesResponse.entries:type_name -> spire.api.types.Entry + 17, // 3: spire.api.server.entry.v1.GetEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 4: spire.api.server.entry.v1.BatchCreateEntryRequest.entries:type_name -> spire.api.types.Entry + 17, // 5: spire.api.server.entry.v1.BatchCreateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 14, // 6: spire.api.server.entry.v1.BatchCreateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchCreateEntryResponse.Result + 18, // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest.entries:type_name -> spire.api.types.Entry + 17, // 8: spire.api.server.entry.v1.BatchUpdateEntryRequest.input_mask:type_name -> spire.api.types.EntryMask + 17, // 9: spire.api.server.entry.v1.BatchUpdateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 15, // 10: spire.api.server.entry.v1.BatchUpdateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + 16, // 11: spire.api.server.entry.v1.BatchDeleteEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + 17, // 12: spire.api.server.entry.v1.GetAuthorizedEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 13: spire.api.server.entry.v1.GetAuthorizedEntriesResponse.entries:type_name -> spire.api.types.Entry + 19, // 14: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_spiffe_id:type_name -> spire.api.types.SPIFFEID + 19, // 15: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_parent_id:type_name -> spire.api.types.SPIFFEID + 20, // 16: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_selectors:type_name -> spire.api.types.SelectorMatch + 21, // 17: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_federates_with:type_name -> spire.api.types.FederatesWithMatch + 22, // 18: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.status:type_name -> spire.api.types.Status + 18, // 19: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 22, // 20: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.status:type_name -> spire.api.types.Status + 18, // 21: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 22, // 22: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result.status:type_name -> spire.api.types.Status + 0, // 23: spire.api.server.entry.v1.Entry.CountEntries:input_type -> spire.api.server.entry.v1.CountEntriesRequest + 2, // 24: spire.api.server.entry.v1.Entry.ListEntries:input_type -> spire.api.server.entry.v1.ListEntriesRequest + 4, // 25: spire.api.server.entry.v1.Entry.GetEntry:input_type -> spire.api.server.entry.v1.GetEntryRequest + 5, // 26: spire.api.server.entry.v1.Entry.BatchCreateEntry:input_type -> spire.api.server.entry.v1.BatchCreateEntryRequest + 7, // 27: spire.api.server.entry.v1.Entry.BatchUpdateEntry:input_type -> spire.api.server.entry.v1.BatchUpdateEntryRequest + 9, // 28: spire.api.server.entry.v1.Entry.BatchDeleteEntry:input_type -> spire.api.server.entry.v1.BatchDeleteEntryRequest + 11, // 29: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:input_type -> spire.api.server.entry.v1.GetAuthorizedEntriesRequest + 1, // 30: spire.api.server.entry.v1.Entry.CountEntries:output_type -> spire.api.server.entry.v1.CountEntriesResponse + 3, // 31: spire.api.server.entry.v1.Entry.ListEntries:output_type -> spire.api.server.entry.v1.ListEntriesResponse + 18, // 32: spire.api.server.entry.v1.Entry.GetEntry:output_type -> spire.api.types.Entry + 6, // 33: spire.api.server.entry.v1.Entry.BatchCreateEntry:output_type -> spire.api.server.entry.v1.BatchCreateEntryResponse + 8, // 34: spire.api.server.entry.v1.Entry.BatchUpdateEntry:output_type -> spire.api.server.entry.v1.BatchUpdateEntryResponse + 10, // 35: spire.api.server.entry.v1.Entry.BatchDeleteEntry:output_type -> spire.api.server.entry.v1.BatchDeleteEntryResponse + 12, // 36: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:output_type -> spire.api.server.entry.v1.GetAuthorizedEntriesResponse + 30, // [30:37] is the sub-list for method output_type + 23, // [23:30] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name +} + +func init() { file_spire_api_server_entry_v1_entry_proto_init() } +func file_spire_api_server_entry_v1_entry_proto_init() { + if File_spire_api_server_entry_v1_entry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_server_entry_v1_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CountEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CountEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAuthorizedEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAuthorizedEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_server_entry_v1_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_spire_api_server_entry_v1_entry_proto_goTypes, + DependencyIndexes: file_spire_api_server_entry_v1_entry_proto_depIdxs, + MessageInfos: file_spire_api_server_entry_v1_entry_proto_msgTypes, + }.Build() + File_spire_api_server_entry_v1_entry_proto = out.File + file_spire_api_server_entry_v1_entry_proto_rawDesc = nil + file_spire_api_server_entry_v1_entry_proto_goTypes = nil + file_spire_api_server_entry_v1_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto new file mode 100644 index 00000000000..28fb498bfba --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto @@ -0,0 +1,176 @@ +syntax = "proto3"; +package spire.api.server.entry.v1; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1;entryv1"; + +import "spire/api/types/entry.proto"; +import "spire/api/types/federateswith.proto"; +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; +import "spire/api/types/status.proto"; + +// Manages registration entries stored by the SPIRE Server. +service Entry { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + rpc CountEntries(CountEntriesRequest) returns (CountEntriesResponse); + + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + rpc ListEntries(ListEntriesRequest) returns (ListEntriesResponse); + + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + rpc GetEntry(GetEntryRequest) returns (spire.api.types.Entry); + + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchCreateEntry(BatchCreateEntryRequest) returns (BatchCreateEntryResponse); + + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchUpdateEntry(BatchUpdateEntryRequest) returns (BatchUpdateEntryResponse); + + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchDeleteEntry(BatchDeleteEntryRequest) returns (BatchDeleteEntryResponse); + + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + rpc GetAuthorizedEntries(GetAuthorizedEntriesRequest) returns (GetAuthorizedEntriesResponse); +} + +message CountEntriesRequest { +} + +message CountEntriesResponse { + int32 count = 1; +} + +message ListEntriesRequest { + message Filter { + spire.api.types.SPIFFEID by_spiffe_id = 1; + spire.api.types.SPIFFEID by_parent_id = 2; + spire.api.types.SelectorMatch by_selectors = 3; + spire.api.types.FederatesWithMatch by_federates_with = 4; + } + + // Filters the entries returned in the response. + Filter filter = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; + + // The maximum number of results to return. The server may further + // constrain this value, or if zero, choose its own. + int32 page_size = 3; + + // The next_page_token value returned from a previous request, if any. + string page_token = 4; +} + +message ListEntriesResponse { + // The list of entries. + repeated spire.api.types.Entry entries = 1; + + // The page token for the next request. Empty if there are no more results. + // This field should be checked by clients even when a page_size was not + // requested, since the server may choose its own (see page_size). + string next_page_token = 2; +} + +message GetEntryRequest { + // Required. ID of the entry to get. + string id = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; +} + +message BatchCreateEntryRequest { + // The entries to be created. The entry ID field is output only, and will + // be ignored here. + repeated spire.api.types.Entry entries = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; +} + +message BatchCreateEntryResponse { + message Result { + // The status of creating the entry. If status code will be + // ALREADY_EXISTS if a similar entry already exists. An entry is + // similar if it has the same spiffe_id, parent_id, and selectors. + spire.api.types.Status status = 1; + + // The entry that was created (.e.g status code is OK) or that already + // exists (i.e. status code is ALREADY_EXISTS). + // + // If the status code is any other value, this field will not be set. + spire.api.types.Entry entry = 2; + } + + // Result for each entry in the request (order is maintained). + repeated Result results = 1; +} + +message BatchUpdateEntryRequest { + // The entries to be updated. + repeated spire.api.types.Entry entries = 1; + + // An input mask indicating what entry fields should be updated. + spire.api.types.EntryMask input_mask = 2; + + // An output mask indicating what entry fields are set in the response. + spire.api.types.EntryMask output_mask = 3; +} + +message BatchUpdateEntryResponse { + message Result { + // The status of creating the entry. + spire.api.types.Status status = 1; + + // The entry that was updated. If the status is OK, it will be the + // entry that was updated. If the status is any other value, this field + // will not be set. + spire.api.types.Entry entry = 2; + } + + // Result for each entry in the request (order is maintained). + repeated Result results = 1; +} + +message BatchDeleteEntryRequest { + // IDs of the entries to delete. + repeated string ids = 1; +} + +message BatchDeleteEntryResponse { + message Result { + // The status of creating the entry. + spire.api.types.Status status = 1; + + // The ID of the entry that was deleted. + string id = 2; + } + + // Result for each entry ID in the request (order is maintained). + repeated Result results = 1; +} + +message GetAuthorizedEntriesRequest { + // An output mask indicating which fields are set in the response. + spire.api.types.EntryMask output_mask = 1; +} + +message GetAuthorizedEntriesResponse { + // The authorized entries. + repeated spire.api.types.Entry entries = 1; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go new file mode 100644 index 00000000000..d0f6b9d0f64 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package entryv1 + +import ( + context "context" + types "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// EntryClient is the client API for Entry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EntryClient interface { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + CountEntries(ctx context.Context, in *CountEntriesRequest, opts ...grpc.CallOption) (*CountEntriesResponse, error) + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + GetEntry(ctx context.Context, in *GetEntryRequest, opts ...grpc.CallOption) (*types.Entry, error) + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchCreateEntry(ctx context.Context, in *BatchCreateEntryRequest, opts ...grpc.CallOption) (*BatchCreateEntryResponse, error) + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchUpdateEntry(ctx context.Context, in *BatchUpdateEntryRequest, opts ...grpc.CallOption) (*BatchUpdateEntryResponse, error) + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchDeleteEntry(ctx context.Context, in *BatchDeleteEntryRequest, opts ...grpc.CallOption) (*BatchDeleteEntryResponse, error) + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + GetAuthorizedEntries(ctx context.Context, in *GetAuthorizedEntriesRequest, opts ...grpc.CallOption) (*GetAuthorizedEntriesResponse, error) +} + +type entryClient struct { + cc grpc.ClientConnInterface +} + +func NewEntryClient(cc grpc.ClientConnInterface) EntryClient { + return &entryClient{cc} +} + +func (c *entryClient) CountEntries(ctx context.Context, in *CountEntriesRequest, opts ...grpc.CallOption) (*CountEntriesResponse, error) { + out := new(CountEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/CountEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { + out := new(ListEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/ListEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) GetEntry(ctx context.Context, in *GetEntryRequest, opts ...grpc.CallOption) (*types.Entry, error) { + out := new(types.Entry) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/GetEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchCreateEntry(ctx context.Context, in *BatchCreateEntryRequest, opts ...grpc.CallOption) (*BatchCreateEntryResponse, error) { + out := new(BatchCreateEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchCreateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchUpdateEntry(ctx context.Context, in *BatchUpdateEntryRequest, opts ...grpc.CallOption) (*BatchUpdateEntryResponse, error) { + out := new(BatchUpdateEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchUpdateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchDeleteEntry(ctx context.Context, in *BatchDeleteEntryRequest, opts ...grpc.CallOption) (*BatchDeleteEntryResponse, error) { + out := new(BatchDeleteEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchDeleteEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) GetAuthorizedEntries(ctx context.Context, in *GetAuthorizedEntriesRequest, opts ...grpc.CallOption) (*GetAuthorizedEntriesResponse, error) { + out := new(GetAuthorizedEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/GetAuthorizedEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EntryServer is the server API for Entry service. +// All implementations must embed UnimplementedEntryServer +// for forward compatibility +type EntryServer interface { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + CountEntries(context.Context, *CountEntriesRequest) (*CountEntriesResponse, error) + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + GetEntry(context.Context, *GetEntryRequest) (*types.Entry, error) + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchCreateEntry(context.Context, *BatchCreateEntryRequest) (*BatchCreateEntryResponse, error) + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchUpdateEntry(context.Context, *BatchUpdateEntryRequest) (*BatchUpdateEntryResponse, error) + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchDeleteEntry(context.Context, *BatchDeleteEntryRequest) (*BatchDeleteEntryResponse, error) + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) + mustEmbedUnimplementedEntryServer() +} + +// UnimplementedEntryServer must be embedded to have forward compatible implementations. +type UnimplementedEntryServer struct { +} + +func (UnimplementedEntryServer) CountEntries(context.Context, *CountEntriesRequest) (*CountEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountEntries not implemented") +} +func (UnimplementedEntryServer) ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEntries not implemented") +} +func (UnimplementedEntryServer) GetEntry(context.Context, *GetEntryRequest) (*types.Entry, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEntry not implemented") +} +func (UnimplementedEntryServer) BatchCreateEntry(context.Context, *BatchCreateEntryRequest) (*BatchCreateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchCreateEntry not implemented") +} +func (UnimplementedEntryServer) BatchUpdateEntry(context.Context, *BatchUpdateEntryRequest) (*BatchUpdateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchUpdateEntry not implemented") +} +func (UnimplementedEntryServer) BatchDeleteEntry(context.Context, *BatchDeleteEntryRequest) (*BatchDeleteEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchDeleteEntry not implemented") +} +func (UnimplementedEntryServer) GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizedEntries not implemented") +} +func (UnimplementedEntryServer) mustEmbedUnimplementedEntryServer() {} + +// UnsafeEntryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EntryServer will +// result in compilation errors. +type UnsafeEntryServer interface { + mustEmbedUnimplementedEntryServer() +} + +func RegisterEntryServer(s grpc.ServiceRegistrar, srv EntryServer) { + s.RegisterService(&_Entry_serviceDesc, srv) +} + +func _Entry_CountEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).CountEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/CountEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).CountEntries(ctx, req.(*CountEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).ListEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/ListEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).ListEntries(ctx, req.(*ListEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_GetEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).GetEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/GetEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).GetEntry(ctx, req.(*GetEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchCreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchCreateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchCreateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchCreateEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchCreateEntry(ctx, req.(*BatchCreateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchUpdateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchUpdateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchUpdateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchUpdateEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchUpdateEntry(ctx, req.(*BatchUpdateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchDeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchDeleteEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchDeleteEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchDeleteEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchDeleteEntry(ctx, req.(*BatchDeleteEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_GetAuthorizedEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAuthorizedEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).GetAuthorizedEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/GetAuthorizedEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).GetAuthorizedEntries(ctx, req.(*GetAuthorizedEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Entry_serviceDesc = grpc.ServiceDesc{ + ServiceName: "spire.api.server.entry.v1.Entry", + HandlerType: (*EntryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CountEntries", + Handler: _Entry_CountEntries_Handler, + }, + { + MethodName: "ListEntries", + Handler: _Entry_ListEntries_Handler, + }, + { + MethodName: "GetEntry", + Handler: _Entry_GetEntry_Handler, + }, + { + MethodName: "BatchCreateEntry", + Handler: _Entry_BatchCreateEntry_Handler, + }, + { + MethodName: "BatchUpdateEntry", + Handler: _Entry_BatchUpdateEntry_Handler, + }, + { + MethodName: "BatchDeleteEntry", + Handler: _Entry_BatchDeleteEntry_Handler, + }, + { + MethodName: "GetAuthorizedEntries", + Handler: _Entry_GetAuthorizedEntries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "spire/api/server/entry/v1/entry.proto", +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go new file mode 100644 index 00000000000..cf7b5998107 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go @@ -0,0 +1,327 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/agent.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Agent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. SPIFFE ID of the agent. + Id *SPIFFEID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Output only. The method by which the agent attested. + AttestationType string `protobuf:"bytes,2,opt,name=attestation_type,json=attestationType,proto3" json:"attestation_type,omitempty"` + // Output only. The X509-SVID serial number. + X509SvidSerialNumber string `protobuf:"bytes,3,opt,name=x509svid_serial_number,json=x509svidSerialNumber,proto3" json:"x509svid_serial_number,omitempty"` + // Output only. The X509-SVID expiration (seconds since Unix epoch). + X509SvidExpiresAt int64 `protobuf:"varint,4,opt,name=x509svid_expires_at,json=x509svidExpiresAt,proto3" json:"x509svid_expires_at,omitempty"` + // Output only. The selectors attributed to the agent during attestation. + Selectors []*Selector `protobuf:"bytes,5,rep,name=selectors,proto3" json:"selectors,omitempty"` + // Output only. Whether or not the agent is banned. + Banned bool `protobuf:"varint,6,opt,name=banned,proto3" json:"banned,omitempty"` +} + +func (x *Agent) Reset() { + *x = Agent{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_agent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Agent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Agent) ProtoMessage() {} + +func (x *Agent) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_agent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Agent.ProtoReflect.Descriptor instead. +func (*Agent) Descriptor() ([]byte, []int) { + return file_spire_api_types_agent_proto_rawDescGZIP(), []int{0} +} + +func (x *Agent) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *Agent) GetAttestationType() string { + if x != nil { + return x.AttestationType + } + return "" +} + +func (x *Agent) GetX509SvidSerialNumber() string { + if x != nil { + return x.X509SvidSerialNumber + } + return "" +} + +func (x *Agent) GetX509SvidExpiresAt() int64 { + if x != nil { + return x.X509SvidExpiresAt + } + return 0 +} + +func (x *Agent) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *Agent) GetBanned() bool { + if x != nil { + return x.Banned + } + return false +} + +type AgentMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // attestation_type field mask + AttestationType bool `protobuf:"varint,2,opt,name=attestation_type,json=attestationType,proto3" json:"attestation_type,omitempty"` + // x509svid_serial_number field mask + X509SvidSerialNumber bool `protobuf:"varint,3,opt,name=x509svid_serial_number,json=x509svidSerialNumber,proto3" json:"x509svid_serial_number,omitempty"` + // x509svid_expires_at field mask + X509SvidExpiresAt bool `protobuf:"varint,4,opt,name=x509svid_expires_at,json=x509svidExpiresAt,proto3" json:"x509svid_expires_at,omitempty"` + // selectors field mask + Selectors bool `protobuf:"varint,5,opt,name=selectors,proto3" json:"selectors,omitempty"` + // banned field mask + Banned bool `protobuf:"varint,6,opt,name=banned,proto3" json:"banned,omitempty"` +} + +func (x *AgentMask) Reset() { + *x = AgentMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_agent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentMask) ProtoMessage() {} + +func (x *AgentMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_agent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentMask.ProtoReflect.Descriptor instead. +func (*AgentMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_agent_proto_rawDescGZIP(), []int{1} +} + +func (x *AgentMask) GetAttestationType() bool { + if x != nil { + return x.AttestationType + } + return false +} + +func (x *AgentMask) GetX509SvidSerialNumber() bool { + if x != nil { + return x.X509SvidSerialNumber + } + return false +} + +func (x *AgentMask) GetX509SvidExpiresAt() bool { + if x != nil { + return x.X509SvidExpiresAt + } + return false +} + +func (x *AgentMask) GetSelectors() bool { + if x != nil { + return x.Selectors + } + return false +} + +func (x *AgentMask) GetBanned() bool { + if x != nil { + return x.Banned + } + return false +} + +var File_spire_api_types_agent_proto protoreflect.FileDescriptor + +var file_spire_api_types_agent_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, + 0x02, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x11, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x62, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x22, 0xd2, 0x01, 0x0a, 0x09, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, + 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_agent_proto_rawDescOnce sync.Once + file_spire_api_types_agent_proto_rawDescData = file_spire_api_types_agent_proto_rawDesc +) + +func file_spire_api_types_agent_proto_rawDescGZIP() []byte { + file_spire_api_types_agent_proto_rawDescOnce.Do(func() { + file_spire_api_types_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_agent_proto_rawDescData) + }) + return file_spire_api_types_agent_proto_rawDescData +} + +var file_spire_api_types_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_agent_proto_goTypes = []interface{}{ + (*Agent)(nil), // 0: spire.api.types.Agent + (*AgentMask)(nil), // 1: spire.api.types.AgentMask + (*SPIFFEID)(nil), // 2: spire.api.types.SPIFFEID + (*Selector)(nil), // 3: spire.api.types.Selector +} +var file_spire_api_types_agent_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.Agent.id:type_name -> spire.api.types.SPIFFEID + 3, // 1: spire.api.types.Agent.selectors:type_name -> spire.api.types.Selector + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_agent_proto_init() } +func file_spire_api_types_agent_proto_init() { + if File_spire_api_types_agent_proto != nil { + return + } + file_spire_api_types_selector_proto_init() + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_agent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Agent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_agent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_agent_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_agent_proto_goTypes, + DependencyIndexes: file_spire_api_types_agent_proto_depIdxs, + MessageInfos: file_spire_api_types_agent_proto_msgTypes, + }.Build() + File_spire_api_types_agent_proto = out.File + file_spire_api_types_agent_proto_rawDesc = nil + file_spire_api_types_agent_proto_goTypes = nil + file_spire_api_types_agent_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto new file mode 100644 index 00000000000..c490bb98a53 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; + +message Agent { + // Output only. SPIFFE ID of the agent. + spire.api.types.SPIFFEID id = 1; + + // Output only. The method by which the agent attested. + string attestation_type = 2; + + // Output only. The X509-SVID serial number. + string x509svid_serial_number = 3; + + // Output only. The X509-SVID expiration (seconds since Unix epoch). + int64 x509svid_expires_at = 4; + + // Output only. The selectors attributed to the agent during attestation. + repeated spire.api.types.Selector selectors = 5; + + // Output only. Whether or not the agent is banned. + bool banned = 6; +} + +message AgentMask { + // attestation_type field mask + bool attestation_type = 2; + + // x509svid_serial_number field mask + bool x509svid_serial_number = 3; + + // x509svid_expires_at field mask + bool x509svid_expires_at = 4; + + // selectors field mask + bool selectors = 5; + + // banned field mask + bool banned = 6; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go new file mode 100644 index 00000000000..4683df13215 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/attestation.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AttestationData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of attestation data. This is typically the name of the plugin + // that produced that data. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The attestation data payload. + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *AttestationData) Reset() { + *x = AttestationData{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_attestation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttestationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttestationData) ProtoMessage() {} + +func (x *AttestationData) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_attestation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttestationData.ProtoReflect.Descriptor instead. +func (*AttestationData) Descriptor() ([]byte, []int) { + return file_spire_api_types_attestation_proto_rawDescGZIP(), []int{0} +} + +func (x *AttestationData) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *AttestationData) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +var File_spire_api_types_attestation_proto protoreflect.FileDescriptor + +var file_spire_api_types_attestation_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_attestation_proto_rawDescOnce sync.Once + file_spire_api_types_attestation_proto_rawDescData = file_spire_api_types_attestation_proto_rawDesc +) + +func file_spire_api_types_attestation_proto_rawDescGZIP() []byte { + file_spire_api_types_attestation_proto_rawDescOnce.Do(func() { + file_spire_api_types_attestation_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_attestation_proto_rawDescData) + }) + return file_spire_api_types_attestation_proto_rawDescData +} + +var file_spire_api_types_attestation_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_attestation_proto_goTypes = []interface{}{ + (*AttestationData)(nil), // 0: spire.api.types.AttestationData +} +var file_spire_api_types_attestation_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_attestation_proto_init() } +func file_spire_api_types_attestation_proto_init() { + if File_spire_api_types_attestation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_attestation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttestationData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_attestation_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_attestation_proto_goTypes, + DependencyIndexes: file_spire_api_types_attestation_proto_depIdxs, + MessageInfos: file_spire_api_types_attestation_proto_msgTypes, + }.Build() + File_spire_api_types_attestation_proto = out.File + file_spire_api_types_attestation_proto_rawDesc = nil + file_spire_api_types_attestation_proto_goTypes = nil + file_spire_api_types_attestation_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto new file mode 100644 index 00000000000..4c2677f62a5 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message AttestationData { + // The type of attestation data. This is typically the name of the plugin + // that produced that data. + string type = 1; + + // The attestation data payload. + bytes payload = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go new file mode 100644 index 00000000000..541638e00a7 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go @@ -0,0 +1,448 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/bundle.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Bundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the trust domain the bundle belongs to (e.g., "example.org"). + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // X.509 authorities for authenticating X509-SVIDs. + X509Authorities []*X509Certificate `protobuf:"bytes,2,rep,name=x509_authorities,json=x509Authorities,proto3" json:"x509_authorities,omitempty"` + // JWT authorities for authenticating JWT-SVIDs. + JwtAuthorities []*JWTKey `protobuf:"bytes,3,rep,name=jwt_authorities,json=jwtAuthorities,proto3" json:"jwt_authorities,omitempty"` + // A hint on how often the bundle should be refreshed from the bundle + // provider, in seconds. Can be zero (meaning no hint available). + RefreshHint int64 `protobuf:"varint,4,opt,name=refresh_hint,json=refreshHint,proto3" json:"refresh_hint,omitempty"` + // The sequence number of the bundle. + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` +} + +func (x *Bundle) Reset() { + *x = Bundle{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{0} +} + +func (x *Bundle) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *Bundle) GetX509Authorities() []*X509Certificate { + if x != nil { + return x.X509Authorities + } + return nil +} + +func (x *Bundle) GetJwtAuthorities() []*JWTKey { + if x != nil { + return x.JwtAuthorities + } + return nil +} + +func (x *Bundle) GetRefreshHint() int64 { + if x != nil { + return x.RefreshHint + } + return 0 +} + +func (x *Bundle) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber + } + return 0 +} + +type X509Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ASN.1 DER encoded bytes of the X.509 certificate. + Asn1 []byte `protobuf:"bytes,1,opt,name=asn1,proto3" json:"asn1,omitempty"` +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{1} +} + +func (x *X509Certificate) GetAsn1() []byte { + if x != nil { + return x.Asn1 + } + return nil +} + +type JWTKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The PKIX encoded public key. + PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The key identifier. + KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + // When the key expires (seconds since Unix epoch). If zero, the key does + // not expire. + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *JWTKey) Reset() { + *x = JWTKey{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTKey) ProtoMessage() {} + +func (x *JWTKey) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTKey.ProtoReflect.Descriptor instead. +func (*JWTKey) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *JWTKey) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *JWTKey) GetKeyId() string { + if x != nil { + return x.KeyId + } + return "" +} + +func (x *JWTKey) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +type BundleMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // x509_authorities field mask. + X509Authorities bool `protobuf:"varint,2,opt,name=x509_authorities,json=x509Authorities,proto3" json:"x509_authorities,omitempty"` + // jwt_authorities field mask. + JwtAuthorities bool `protobuf:"varint,3,opt,name=jwt_authorities,json=jwtAuthorities,proto3" json:"jwt_authorities,omitempty"` + // refresh_hint field mask. + RefreshHint bool `protobuf:"varint,4,opt,name=refresh_hint,json=refreshHint,proto3" json:"refresh_hint,omitempty"` + // sequence_number field mask. + SequenceNumber bool `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` +} + +func (x *BundleMask) Reset() { + *x = BundleMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleMask) ProtoMessage() {} + +func (x *BundleMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleMask.ProtoReflect.Descriptor instead. +func (*BundleMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{3} +} + +func (x *BundleMask) GetX509Authorities() bool { + if x != nil { + return x.X509Authorities + } + return false +} + +func (x *BundleMask) GetJwtAuthorities() bool { + if x != nil { + return x.JwtAuthorities + } + return false +} + +func (x *BundleMask) GetRefreshHint() bool { + if x != nil { + return x.RefreshHint + } + return false +} + +func (x *BundleMask) GetSequenceNumber() bool { + if x != nil { + return x.SequenceNumber + } + return false +} + +var File_spire_api_types_bundle_proto protoreflect.FileDescriptor + +var file_spire_api_types_bundle_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x86, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, + 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x6a, 0x77, + 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x52, 0x0e, 0x6a, 0x77, + 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, + 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x25, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x61, + 0x73, 0x6e, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x73, 0x6e, 0x31, 0x22, + 0x5d, 0x0a, 0x06, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x22, 0xac, + 0x01, 0x0a, 0x0a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x29, 0x0a, + 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6a, 0x77, 0x74, 0x5f, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_bundle_proto_rawDescOnce sync.Once + file_spire_api_types_bundle_proto_rawDescData = file_spire_api_types_bundle_proto_rawDesc +) + +func file_spire_api_types_bundle_proto_rawDescGZIP() []byte { + file_spire_api_types_bundle_proto_rawDescOnce.Do(func() { + file_spire_api_types_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_bundle_proto_rawDescData) + }) + return file_spire_api_types_bundle_proto_rawDescData +} + +var file_spire_api_types_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_spire_api_types_bundle_proto_goTypes = []interface{}{ + (*Bundle)(nil), // 0: spire.api.types.Bundle + (*X509Certificate)(nil), // 1: spire.api.types.X509Certificate + (*JWTKey)(nil), // 2: spire.api.types.JWTKey + (*BundleMask)(nil), // 3: spire.api.types.BundleMask +} +var file_spire_api_types_bundle_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.Bundle.x509_authorities:type_name -> spire.api.types.X509Certificate + 2, // 1: spire.api.types.Bundle.jwt_authorities:type_name -> spire.api.types.JWTKey + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_bundle_proto_init() } +func file_spire_api_types_bundle_proto_init() { + if File_spire_api_types_bundle_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_bundle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BundleMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_bundle_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_bundle_proto_goTypes, + DependencyIndexes: file_spire_api_types_bundle_proto_depIdxs, + MessageInfos: file_spire_api_types_bundle_proto_msgTypes, + }.Build() + File_spire_api_types_bundle_proto = out.File + file_spire_api_types_bundle_proto_rawDesc = nil + file_spire_api_types_bundle_proto_goTypes = nil + file_spire_api_types_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto new file mode 100644 index 00000000000..435f33958c5 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Bundle { + // The name of the trust domain the bundle belongs to (e.g., "example.org"). + string trust_domain = 1; + + // X.509 authorities for authenticating X509-SVIDs. + repeated X509Certificate x509_authorities = 2; + + // JWT authorities for authenticating JWT-SVIDs. + repeated JWTKey jwt_authorities = 3; + + // A hint on how often the bundle should be refreshed from the bundle + // provider, in seconds. Can be zero (meaning no hint available). + int64 refresh_hint = 4; + + // The sequence number of the bundle. + uint64 sequence_number = 5; +} + +message X509Certificate { + // The ASN.1 DER encoded bytes of the X.509 certificate. + bytes asn1 = 1; +} + +message JWTKey { + // The PKIX encoded public key. + bytes public_key = 1; + + // The key identifier. + string key_id = 2; + + // When the key expires (seconds since Unix epoch). If zero, the key does + // not expire. + int64 expires_at = 3; +} + +message BundleMask { + // x509_authorities field mask. + bool x509_authorities = 2; + + // jwt_authorities field mask. + bool jwt_authorities = 3; + + // refresh_hint field mask. + bool refresh_hint = 4; + + // sequence_number field mask. + bool sequence_number = 5; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go new file mode 100644 index 00000000000..383024ad000 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go @@ -0,0 +1,467 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/entry.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Globally unique ID for the entry. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // The SPIFFE ID of the identity described by this entry. + SpiffeId *SPIFFEID `protobuf:"bytes,2,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // Who the entry is delegated to. If the entry describes a node, this is + // set to the SPIFFE ID of the SPIRE server of the trust domain (e.g. + // spiffe://example.org/spire/server). Otherwise, it will be set to a node + // SPIFFE ID. + ParentId *SPIFFEID `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + // The selectors which identify which entities match this entry. If this is + // an entry for a node, these selectors represent selectors produced by + // node attestation. Otherwise, these selectors represent those produced by + // workload attestation. + Selectors []*Selector `protobuf:"bytes,4,rep,name=selectors,proto3" json:"selectors,omitempty"` + // The time to live for identities issued for this entry (in seconds). + Ttl int32 `protobuf:"varint,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + // The names of trust domains the identity described by this entry + // federates with. + FederatesWith []string `protobuf:"bytes,6,rep,name=federates_with,json=federatesWith,proto3" json:"federates_with,omitempty"` + // Whether or not the identity described by this entry is an administrative + // workload. Administrative workloads are granted additional access to + // various managerial server APIs, such as entry registration. + Admin bool `protobuf:"varint,7,opt,name=admin,proto3" json:"admin,omitempty"` + // Whether or not the identity described by this entry represents a + // downstream SPIRE server. Downstream SPIRE servers have additional access + // to various signing APIs, such as those used to sign X.509 CA + // certificates and publish JWT signing keys. + Downstream bool `protobuf:"varint,8,opt,name=downstream,proto3" json:"downstream,omitempty"` + // When the entry expires (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,9,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // A list of DNS names associated with the identity described by this entry. + DnsNames []string `protobuf:"bytes,10,rep,name=dns_names,json=dnsNames,proto3" json:"dns_names,omitempty"` + // Revision number is bumped every time the entry is updated + RevisionNumber int64 `protobuf:"varint,11,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // Determines if the issued identity is exportable to a store + StoreSvid bool `protobuf:"varint,12,opt,name=store_svid,json=storeSvid,proto3" json:"store_svid,omitempty"` +} + +func (x *Entry) Reset() { + *x = Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_spire_api_types_entry_proto_rawDescGZIP(), []int{0} +} + +func (x *Entry) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Entry) GetSpiffeId() *SPIFFEID { + if x != nil { + return x.SpiffeId + } + return nil +} + +func (x *Entry) GetParentId() *SPIFFEID { + if x != nil { + return x.ParentId + } + return nil +} + +func (x *Entry) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *Entry) GetTtl() int32 { + if x != nil { + return x.Ttl + } + return 0 +} + +func (x *Entry) GetFederatesWith() []string { + if x != nil { + return x.FederatesWith + } + return nil +} + +func (x *Entry) GetAdmin() bool { + if x != nil { + return x.Admin + } + return false +} + +func (x *Entry) GetDownstream() bool { + if x != nil { + return x.Downstream + } + return false +} + +func (x *Entry) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +func (x *Entry) GetDnsNames() []string { + if x != nil { + return x.DnsNames + } + return nil +} + +func (x *Entry) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + +func (x *Entry) GetStoreSvid() bool { + if x != nil { + return x.StoreSvid + } + return false +} + +// Field mask for Entry fields +type EntryMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // spiffe_id field mask + SpiffeId bool `protobuf:"varint,2,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // parent_id field mask + ParentId bool `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + // selectors field mask + Selectors bool `protobuf:"varint,4,opt,name=selectors,proto3" json:"selectors,omitempty"` + // ttl field mask + Ttl bool `protobuf:"varint,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + // federates_with field mask + FederatesWith bool `protobuf:"varint,6,opt,name=federates_with,json=federatesWith,proto3" json:"federates_with,omitempty"` + // admin field mask + Admin bool `protobuf:"varint,7,opt,name=admin,proto3" json:"admin,omitempty"` + // downstream field mask + Downstream bool `protobuf:"varint,8,opt,name=downstream,proto3" json:"downstream,omitempty"` + // expires_at field mask + ExpiresAt bool `protobuf:"varint,9,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // dns_names field mask + DnsNames bool `protobuf:"varint,10,opt,name=dns_names,json=dnsNames,proto3" json:"dns_names,omitempty"` + // revision_number field mask + RevisionNumber bool `protobuf:"varint,11,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // store_svid field mask + StoreSvid bool `protobuf:"varint,12,opt,name=store_svid,json=storeSvid,proto3" json:"store_svid,omitempty"` +} + +func (x *EntryMask) Reset() { + *x = EntryMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntryMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryMask) ProtoMessage() {} + +func (x *EntryMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryMask.ProtoReflect.Descriptor instead. +func (*EntryMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *EntryMask) GetSpiffeId() bool { + if x != nil { + return x.SpiffeId + } + return false +} + +func (x *EntryMask) GetParentId() bool { + if x != nil { + return x.ParentId + } + return false +} + +func (x *EntryMask) GetSelectors() bool { + if x != nil { + return x.Selectors + } + return false +} + +func (x *EntryMask) GetTtl() bool { + if x != nil { + return x.Ttl + } + return false +} + +func (x *EntryMask) GetFederatesWith() bool { + if x != nil { + return x.FederatesWith + } + return false +} + +func (x *EntryMask) GetAdmin() bool { + if x != nil { + return x.Admin + } + return false +} + +func (x *EntryMask) GetDownstream() bool { + if x != nil { + return x.Downstream + } + return false +} + +func (x *EntryMask) GetExpiresAt() bool { + if x != nil { + return x.ExpiresAt + } + return false +} + +func (x *EntryMask) GetDnsNames() bool { + if x != nil { + return x.DnsNames + } + return false +} + +func (x *EntryMask) GetRevisionNumber() bool { + if x != nil { + return x.RevisionNumber + } + return false +} + +func (x *EntryMask) GetStoreSvid() bool { + if x != nil { + return x.StoreSvid + } + return false +} + +var File_spire_api_types_entry_proto protoreflect.FileDescriptor + +var file_spire_api_types_entry_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, + 0x03, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, + 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, + 0x12, 0x36, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x08, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, + 0x76, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x76, 0x69, 0x64, 0x22, 0xd6, 0x02, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, + 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x25, 0x0a, 0x0e, + 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x6f, 0x77, + 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x6e, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x76, 0x69, 0x64, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_entry_proto_rawDescOnce sync.Once + file_spire_api_types_entry_proto_rawDescData = file_spire_api_types_entry_proto_rawDesc +) + +func file_spire_api_types_entry_proto_rawDescGZIP() []byte { + file_spire_api_types_entry_proto_rawDescOnce.Do(func() { + file_spire_api_types_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_entry_proto_rawDescData) + }) + return file_spire_api_types_entry_proto_rawDescData +} + +var file_spire_api_types_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_entry_proto_goTypes = []interface{}{ + (*Entry)(nil), // 0: spire.api.types.Entry + (*EntryMask)(nil), // 1: spire.api.types.EntryMask + (*SPIFFEID)(nil), // 2: spire.api.types.SPIFFEID + (*Selector)(nil), // 3: spire.api.types.Selector +} +var file_spire_api_types_entry_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.Entry.spiffe_id:type_name -> spire.api.types.SPIFFEID + 2, // 1: spire.api.types.Entry.parent_id:type_name -> spire.api.types.SPIFFEID + 3, // 2: spire.api.types.Entry.selectors:type_name -> spire.api.types.Selector + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_spire_api_types_entry_proto_init() } +func file_spire_api_types_entry_proto_init() { + if File_spire_api_types_entry_proto != nil { + return + } + file_spire_api_types_selector_proto_init() + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntryMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_entry_proto_goTypes, + DependencyIndexes: file_spire_api_types_entry_proto_depIdxs, + MessageInfos: file_spire_api_types_entry_proto_msgTypes, + }.Build() + File_spire_api_types_entry_proto = out.File + file_spire_api_types_entry_proto_rawDesc = nil + file_spire_api_types_entry_proto_goTypes = nil + file_spire_api_types_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto new file mode 100644 index 00000000000..49fa4671729 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; + +message Entry { + // Globally unique ID for the entry. + string id = 1; + + // The SPIFFE ID of the identity described by this entry. + spire.api.types.SPIFFEID spiffe_id = 2; + + // Who the entry is delegated to. If the entry describes a node, this is + // set to the SPIFFE ID of the SPIRE server of the trust domain (e.g. + // spiffe://example.org/spire/server). Otherwise, it will be set to a node + // SPIFFE ID. + spire.api.types.SPIFFEID parent_id = 3; + + // The selectors which identify which entities match this entry. If this is + // an entry for a node, these selectors represent selectors produced by + // node attestation. Otherwise, these selectors represent those produced by + // workload attestation. + repeated spire.api.types.Selector selectors = 4; + + // The time to live for identities issued for this entry (in seconds). + int32 ttl = 5; + + // The names of trust domains the identity described by this entry + // federates with. + repeated string federates_with = 6; + + // Whether or not the identity described by this entry is an administrative + // workload. Administrative workloads are granted additional access to + // various managerial server APIs, such as entry registration. + bool admin = 7; + + // Whether or not the identity described by this entry represents a + // downstream SPIRE server. Downstream SPIRE servers have additional access + // to various signing APIs, such as those used to sign X.509 CA + // certificates and publish JWT signing keys. + bool downstream = 8; + + // When the entry expires (seconds since Unix epoch). + int64 expires_at = 9; + + // A list of DNS names associated with the identity described by this entry. + repeated string dns_names = 10; + + // Revision number is bumped every time the entry is updated + int64 revision_number = 11; + + // Determines if the issued identity is exportable to a store + bool store_svid = 12; +} + +// Field mask for Entry fields +message EntryMask { + // spiffe_id field mask + bool spiffe_id = 2; + + // parent_id field mask + bool parent_id = 3; + + // selectors field mask + bool selectors = 4; + + // ttl field mask + bool ttl = 5; + + // federates_with field mask + bool federates_with = 6; + + // admin field mask + bool admin = 7; + + // downstream field mask + bool downstream = 8; + + // expires_at field mask + bool expires_at = 9; + + // dns_names field mask + bool dns_names = 10; + + // revision_number field mask + bool revision_number = 11; + + // store_svid field mask + bool store_svid = 12; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go new file mode 100644 index 00000000000..ad5cfff8819 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go @@ -0,0 +1,271 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/federateswith.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FederatesWithMatch_MatchBehavior int32 + +const ( + // Indicates that the federated trust domains in this match are + // equal to the candidate trust domains, independent of ordering. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_EXACT ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e2' + FederatesWithMatch_MATCH_EXACT FederatesWithMatch_MatchBehavior = 0 + // Indicates that all candidates which have a non-empty subset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUBSET ["spiffe://td1"] + // Entries that match: + // - 'e1' + FederatesWithMatch_MATCH_SUBSET FederatesWithMatch_MatchBehavior = 1 + // Indicate that all candidates which are a superset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUPERSET ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e1' + // - 'e2' + FederatesWithMatch_MATCH_SUPERSET FederatesWithMatch_MatchBehavior = 2 + // Indicates that all candidates which have at least one + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_ANY ["spiffe://td1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + FederatesWithMatch_MATCH_ANY FederatesWithMatch_MatchBehavior = 3 +) + +// Enum value maps for FederatesWithMatch_MatchBehavior. +var ( + FederatesWithMatch_MatchBehavior_name = map[int32]string{ + 0: "MATCH_EXACT", + 1: "MATCH_SUBSET", + 2: "MATCH_SUPERSET", + 3: "MATCH_ANY", + } + FederatesWithMatch_MatchBehavior_value = map[string]int32{ + "MATCH_EXACT": 0, + "MATCH_SUBSET": 1, + "MATCH_SUPERSET": 2, + "MATCH_ANY": 3, + } +) + +func (x FederatesWithMatch_MatchBehavior) Enum() *FederatesWithMatch_MatchBehavior { + p := new(FederatesWithMatch_MatchBehavior) + *p = x + return p +} + +func (x FederatesWithMatch_MatchBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FederatesWithMatch_MatchBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_federateswith_proto_enumTypes[0].Descriptor() +} + +func (FederatesWithMatch_MatchBehavior) Type() protoreflect.EnumType { + return &file_spire_api_types_federateswith_proto_enumTypes[0] +} + +func (x FederatesWithMatch_MatchBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FederatesWithMatch_MatchBehavior.Descriptor instead. +func (FederatesWithMatch_MatchBehavior) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_federateswith_proto_rawDescGZIP(), []int{0, 0} +} + +type FederatesWithMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of trust domain names to match on (e.g., "example.org"). + TrustDomains []string `protobuf:"bytes,1,rep,name=trust_domains,json=trustDomains,proto3" json:"trust_domains,omitempty"` + // How to match the trust domains. + Match FederatesWithMatch_MatchBehavior `protobuf:"varint,2,opt,name=match,proto3,enum=spire.api.types.FederatesWithMatch_MatchBehavior" json:"match,omitempty"` +} + +func (x *FederatesWithMatch) Reset() { + *x = FederatesWithMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federateswith_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederatesWithMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederatesWithMatch) ProtoMessage() {} + +func (x *FederatesWithMatch) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federateswith_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederatesWithMatch.ProtoReflect.Descriptor instead. +func (*FederatesWithMatch) Descriptor() ([]byte, []int) { + return file_spire_api_types_federateswith_proto_rawDescGZIP(), []int{0} +} + +func (x *FederatesWithMatch) GetTrustDomains() []string { + if x != nil { + return x.TrustDomains + } + return nil +} + +func (x *FederatesWithMatch) GetMatch() FederatesWithMatch_MatchBehavior { + if x != nil { + return x.Match + } + return FederatesWithMatch_MATCH_EXACT +} + +var File_spire_api_types_federateswith_proto protoreflect.FileDescriptor + +var file_spire_api_types_federateswith_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x77, 0x69, 0x74, 0x68, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, + 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, + 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x55, 0x0a, 0x0d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0f, 0x0a, 0x0b, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x53, 0x45, + 0x54, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x41, 0x4e, 0x59, + 0x10, 0x03, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, + 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_federateswith_proto_rawDescOnce sync.Once + file_spire_api_types_federateswith_proto_rawDescData = file_spire_api_types_federateswith_proto_rawDesc +) + +func file_spire_api_types_federateswith_proto_rawDescGZIP() []byte { + file_spire_api_types_federateswith_proto_rawDescOnce.Do(func() { + file_spire_api_types_federateswith_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_federateswith_proto_rawDescData) + }) + return file_spire_api_types_federateswith_proto_rawDescData +} + +var file_spire_api_types_federateswith_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_federateswith_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_federateswith_proto_goTypes = []interface{}{ + (FederatesWithMatch_MatchBehavior)(0), // 0: spire.api.types.FederatesWithMatch.MatchBehavior + (*FederatesWithMatch)(nil), // 1: spire.api.types.FederatesWithMatch +} +var file_spire_api_types_federateswith_proto_depIdxs = []int32{ + 0, // 0: spire.api.types.FederatesWithMatch.match:type_name -> spire.api.types.FederatesWithMatch.MatchBehavior + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_federateswith_proto_init() } +func file_spire_api_types_federateswith_proto_init() { + if File_spire_api_types_federateswith_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_federateswith_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederatesWithMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_federateswith_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_federateswith_proto_goTypes, + DependencyIndexes: file_spire_api_types_federateswith_proto_depIdxs, + EnumInfos: file_spire_api_types_federateswith_proto_enumTypes, + MessageInfos: file_spire_api_types_federateswith_proto_msgTypes, + }.Build() + File_spire_api_types_federateswith_proto = out.File + file_spire_api_types_federateswith_proto_rawDesc = nil + file_spire_api_types_federateswith_proto_goTypes = nil + file_spire_api_types_federateswith_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto new file mode 100644 index 00000000000..a5be5f85b20 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message FederatesWithMatch { + enum MatchBehavior { + // Indicates that the federated trust domains in this match are + // equal to the candidate trust domains, independent of ordering. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_EXACT ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e2' + MATCH_EXACT = 0; + + // Indicates that all candidates which have a non-empty subset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUBSET ["spiffe://td1"] + // Entries that match: + // - 'e1' + MATCH_SUBSET = 1; + + // Indicate that all candidates which are a superset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUPERSET ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e1' + // - 'e2' + MATCH_SUPERSET = 2; + + // Indicates that all candidates which have at least one + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_ANY ["spiffe://td1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + MATCH_ANY = 3; + } + + // The set of trust domain names to match on (e.g., "example.org"). + repeated string trust_domains = 1; + + // How to match the trust domains. + MatchBehavior match = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go new file mode 100644 index 00000000000..ee4b0a57a72 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go @@ -0,0 +1,464 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/federationrelationship.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FederationRelationship struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The trust domain name (e.g., "example.org") to federate with. + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // Required. URL of the SPIFFE bundle endpoint that provides the trust + // bundle to federate with. Must use the HTTPS protocol. + BundleEndpointUrl string `protobuf:"bytes,2,opt,name=bundle_endpoint_url,json=bundleEndpointUrl,proto3" json:"bundle_endpoint_url,omitempty"` + // Required. The endpoint profile type. + // + // Types that are assignable to BundleEndpointProfile: + // *FederationRelationship_HttpsWeb + // *FederationRelationship_HttpsSpiffe + BundleEndpointProfile isFederationRelationship_BundleEndpointProfile `protobuf_oneof:"bundle_endpoint_profile"` + // Optional. The bundle for the trust domain. This field can be used to + // create or replace the referenced trust domains' bundle when the + // relationship is created or updated. When the relationship is retrieved, + // it will be set to the referenced trust domain's latest bundle (if + // available). Please note that the `https_spiffe` profile requires an + // existing trust domain bundle in order to function correctly. The + // required bundle must match the trust domain specified in the bundle + // endpoint SPIFFE ID. If the bundle endpoint SPIFFE ID resides in the same + // trust domain that you're trying to federate with, you may optionally + // specify that trust domain bundle here. If the bundle endpoint SPIFFE ID + // _does not_ reside in the same trust domain that you're federating with, + // please ensure that the trust domain bundle for that trust domain has + // been configured separately (e.g. configured via another federation + // relationship or manually set via the Bundle API). + TrustDomainBundle *Bundle `protobuf:"bytes,5,opt,name=trust_domain_bundle,json=trustDomainBundle,proto3" json:"trust_domain_bundle,omitempty"` +} + +func (x *FederationRelationship) Reset() { + *x = FederationRelationship{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederationRelationship) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederationRelationship) ProtoMessage() {} + +func (x *FederationRelationship) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederationRelationship.ProtoReflect.Descriptor instead. +func (*FederationRelationship) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{0} +} + +func (x *FederationRelationship) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *FederationRelationship) GetBundleEndpointUrl() string { + if x != nil { + return x.BundleEndpointUrl + } + return "" +} + +func (m *FederationRelationship) GetBundleEndpointProfile() isFederationRelationship_BundleEndpointProfile { + if m != nil { + return m.BundleEndpointProfile + } + return nil +} + +func (x *FederationRelationship) GetHttpsWeb() *HTTPSWebProfile { + if x, ok := x.GetBundleEndpointProfile().(*FederationRelationship_HttpsWeb); ok { + return x.HttpsWeb + } + return nil +} + +func (x *FederationRelationship) GetHttpsSpiffe() *HTTPSSPIFFEProfile { + if x, ok := x.GetBundleEndpointProfile().(*FederationRelationship_HttpsSpiffe); ok { + return x.HttpsSpiffe + } + return nil +} + +func (x *FederationRelationship) GetTrustDomainBundle() *Bundle { + if x != nil { + return x.TrustDomainBundle + } + return nil +} + +type isFederationRelationship_BundleEndpointProfile interface { + isFederationRelationship_BundleEndpointProfile() +} + +type FederationRelationship_HttpsWeb struct { + // Use Web PKI endpoint profile. + HttpsWeb *HTTPSWebProfile `protobuf:"bytes,3,opt,name=https_web,json=httpsWeb,proto3,oneof"` +} + +type FederationRelationship_HttpsSpiffe struct { + // Use SPIFFE Authentication endpoint profile. + HttpsSpiffe *HTTPSSPIFFEProfile `protobuf:"bytes,4,opt,name=https_spiffe,json=httpsSpiffe,proto3,oneof"` +} + +func (*FederationRelationship_HttpsWeb) isFederationRelationship_BundleEndpointProfile() {} + +func (*FederationRelationship_HttpsSpiffe) isFederationRelationship_BundleEndpointProfile() {} + +type HTTPSSPIFFEProfile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Specifies the expected SPIFFE ID of the SPIFFE bundle endpoint + // server. + EndpointSpiffeId string `protobuf:"bytes,1,opt,name=endpoint_spiffe_id,json=endpointSpiffeId,proto3" json:"endpoint_spiffe_id,omitempty"` +} + +func (x *HTTPSSPIFFEProfile) Reset() { + *x = HTTPSSPIFFEProfile{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPSSPIFFEProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPSSPIFFEProfile) ProtoMessage() {} + +func (x *HTTPSSPIFFEProfile) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPSSPIFFEProfile.ProtoReflect.Descriptor instead. +func (*HTTPSSPIFFEProfile) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPSSPIFFEProfile) GetEndpointSpiffeId() string { + if x != nil { + return x.EndpointSpiffeId + } + return "" +} + +type HTTPSWebProfile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HTTPSWebProfile) Reset() { + *x = HTTPSWebProfile{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPSWebProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPSWebProfile) ProtoMessage() {} + +func (x *HTTPSWebProfile) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPSWebProfile.ProtoReflect.Descriptor instead. +func (*HTTPSWebProfile) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{2} +} + +type FederationRelationshipMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // bundle_endpoint_url field mask. + BundleEndpointUrl bool `protobuf:"varint,1,opt,name=bundle_endpoint_url,json=bundleEndpointUrl,proto3" json:"bundle_endpoint_url,omitempty"` + // bundle_endpoint_profile field mask. + BundleEndpointProfile bool `protobuf:"varint,2,opt,name=bundle_endpoint_profile,json=bundleEndpointProfile,proto3" json:"bundle_endpoint_profile,omitempty"` + // trust_domain_bundle field mask. + TrustDomainBundle bool `protobuf:"varint,3,opt,name=trust_domain_bundle,json=trustDomainBundle,proto3" json:"trust_domain_bundle,omitempty"` +} + +func (x *FederationRelationshipMask) Reset() { + *x = FederationRelationshipMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederationRelationshipMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederationRelationshipMask) ProtoMessage() {} + +func (x *FederationRelationshipMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederationRelationshipMask.ProtoReflect.Descriptor instead. +func (*FederationRelationshipMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{3} +} + +func (x *FederationRelationshipMask) GetBundleEndpointUrl() bool { + if x != nil { + return x.BundleEndpointUrl + } + return false +} + +func (x *FederationRelationshipMask) GetBundleEndpointProfile() bool { + if x != nil { + return x.BundleEndpointProfile + } + return false +} + +func (x *FederationRelationshipMask) GetTrustDomainBundle() bool { + if x != nil { + return x.TrustDomainBundle + } + return false +} + +var File_spire_api_types_federationrelationship_proto protoreflect.FileDescriptor + +var file_spire_api_types_federationrelationship_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, + 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x02, + 0x0a, 0x16, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x6c, 0x12, 0x3f, 0x0a, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x5f, 0x77, 0x65, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x48, 0x54, 0x54, 0x50, 0x53, 0x57, 0x65, 0x62, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x48, 0x00, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x73, 0x57, 0x65, 0x62, 0x12, 0x48, 0x0a, 0x0c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x53, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x12, 0x47, 0x0a, 0x13, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x11, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, + 0x19, 0x0a, 0x17, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x48, 0x54, + 0x54, 0x50, 0x53, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x69, + 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x22, 0x11, + 0x0a, 0x0f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x57, 0x65, 0x62, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1a, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x2e, 0x0a, 0x13, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x6c, + 0x12, 0x36, 0x0a, 0x17, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x15, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_federationrelationship_proto_rawDescOnce sync.Once + file_spire_api_types_federationrelationship_proto_rawDescData = file_spire_api_types_federationrelationship_proto_rawDesc +) + +func file_spire_api_types_federationrelationship_proto_rawDescGZIP() []byte { + file_spire_api_types_federationrelationship_proto_rawDescOnce.Do(func() { + file_spire_api_types_federationrelationship_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_federationrelationship_proto_rawDescData) + }) + return file_spire_api_types_federationrelationship_proto_rawDescData +} + +var file_spire_api_types_federationrelationship_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_spire_api_types_federationrelationship_proto_goTypes = []interface{}{ + (*FederationRelationship)(nil), // 0: spire.api.types.FederationRelationship + (*HTTPSSPIFFEProfile)(nil), // 1: spire.api.types.HTTPSSPIFFEProfile + (*HTTPSWebProfile)(nil), // 2: spire.api.types.HTTPSWebProfile + (*FederationRelationshipMask)(nil), // 3: spire.api.types.FederationRelationshipMask + (*Bundle)(nil), // 4: spire.api.types.Bundle +} +var file_spire_api_types_federationrelationship_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.FederationRelationship.https_web:type_name -> spire.api.types.HTTPSWebProfile + 1, // 1: spire.api.types.FederationRelationship.https_spiffe:type_name -> spire.api.types.HTTPSSPIFFEProfile + 4, // 2: spire.api.types.FederationRelationship.trust_domain_bundle:type_name -> spire.api.types.Bundle + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_spire_api_types_federationrelationship_proto_init() } +func file_spire_api_types_federationrelationship_proto_init() { + if File_spire_api_types_federationrelationship_proto != nil { + return + } + file_spire_api_types_bundle_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_federationrelationship_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederationRelationship); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPSSPIFFEProfile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPSWebProfile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederationRelationshipMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FederationRelationship_HttpsWeb)(nil), + (*FederationRelationship_HttpsSpiffe)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_federationrelationship_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_federationrelationship_proto_goTypes, + DependencyIndexes: file_spire_api_types_federationrelationship_proto_depIdxs, + MessageInfos: file_spire_api_types_federationrelationship_proto_msgTypes, + }.Build() + File_spire_api_types_federationrelationship_proto = out.File + file_spire_api_types_federationrelationship_proto_rawDesc = nil + file_spire_api_types_federationrelationship_proto_goTypes = nil + file_spire_api_types_federationrelationship_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto new file mode 100644 index 00000000000..7801e22e864 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/bundle.proto"; + +message FederationRelationship { + // Required. The trust domain name (e.g., "example.org") to federate with. + string trust_domain = 1; + + // Required. URL of the SPIFFE bundle endpoint that provides the trust + // bundle to federate with. Must use the HTTPS protocol. + string bundle_endpoint_url = 2; + + // Required. The endpoint profile type. + oneof bundle_endpoint_profile { + // Use Web PKI endpoint profile. + HTTPSWebProfile https_web = 3; + + // Use SPIFFE Authentication endpoint profile. + HTTPSSPIFFEProfile https_spiffe = 4; + } + + // Optional. The bundle for the trust domain. This field can be used to + // create or replace the referenced trust domains' bundle when the + // relationship is created or updated. When the relationship is retrieved, + // it will be set to the referenced trust domain's latest bundle (if + // available). Please note that the `https_spiffe` profile requires an + // existing trust domain bundle in order to function correctly. The + // required bundle must match the trust domain specified in the bundle + // endpoint SPIFFE ID. If the bundle endpoint SPIFFE ID resides in the same + // trust domain that you're trying to federate with, you may optionally + // specify that trust domain bundle here. If the bundle endpoint SPIFFE ID + // _does not_ reside in the same trust domain that you're federating with, + // please ensure that the trust domain bundle for that trust domain has + // been configured separately (e.g. configured via another federation + // relationship or manually set via the Bundle API). + spire.api.types.Bundle trust_domain_bundle = 5; +} + +message HTTPSSPIFFEProfile { + // Required. Specifies the expected SPIFFE ID of the SPIFFE bundle endpoint + // server. + string endpoint_spiffe_id = 1; +} + +message HTTPSWebProfile { +} + +message FederationRelationshipMask { + // bundle_endpoint_url field mask. + bool bundle_endpoint_url = 1; + + // bundle_endpoint_profile field mask. + bool bundle_endpoint_profile = 2; + + // trust_domain_bundle field mask. + bool trust_domain_bundle = 3; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go new file mode 100644 index 00000000000..48392aa4d2b --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go @@ -0,0 +1,158 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/jointoken.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type JoinToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value of the token. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The token expiration (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,2,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *JoinToken) Reset() { + *x = JoinToken{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_jointoken_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinToken) ProtoMessage() {} + +func (x *JoinToken) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_jointoken_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinToken.ProtoReflect.Descriptor instead. +func (*JoinToken) Descriptor() ([]byte, []int) { + return file_spire_api_types_jointoken_proto_rawDescGZIP(), []int{0} +} + +func (x *JoinToken) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *JoinToken) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +var File_spire_api_types_jointoken_proto protoreflect.FileDescriptor + +var file_spire_api_types_jointoken_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x22, 0x40, 0x0a, 0x09, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, + 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_jointoken_proto_rawDescOnce sync.Once + file_spire_api_types_jointoken_proto_rawDescData = file_spire_api_types_jointoken_proto_rawDesc +) + +func file_spire_api_types_jointoken_proto_rawDescGZIP() []byte { + file_spire_api_types_jointoken_proto_rawDescOnce.Do(func() { + file_spire_api_types_jointoken_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_jointoken_proto_rawDescData) + }) + return file_spire_api_types_jointoken_proto_rawDescData +} + +var file_spire_api_types_jointoken_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_jointoken_proto_goTypes = []interface{}{ + (*JoinToken)(nil), // 0: spire.api.types.JoinToken +} +var file_spire_api_types_jointoken_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_jointoken_proto_init() } +func file_spire_api_types_jointoken_proto_init() { + if File_spire_api_types_jointoken_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_jointoken_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_jointoken_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_jointoken_proto_goTypes, + DependencyIndexes: file_spire_api_types_jointoken_proto_depIdxs, + MessageInfos: file_spire_api_types_jointoken_proto_msgTypes, + }.Build() + File_spire_api_types_jointoken_proto = out.File + file_spire_api_types_jointoken_proto_rawDesc = nil + file_spire_api_types_jointoken_proto_goTypes = nil + file_spire_api_types_jointoken_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto new file mode 100644 index 00000000000..f12a9a30ad3 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message JoinToken { + // The value of the token. + string value = 1; + + // The token expiration (seconds since Unix epoch). + int64 expires_at = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go new file mode 100644 index 00000000000..ec9f7cbfa80 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go @@ -0,0 +1,187 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/jwtsvid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// JWT SPIFFE Verifiable Identity Document. It contains the raw JWT token +// as well as a few denormalized fields for convenience. +type JWTSVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The serialized JWT token. + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + // The SPIFFE ID of the JWT-SVID. + Id *SPIFFEID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Expiration timestamp (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // Issuance timestamp (seconds since Unix epoch). + IssuedAt int64 `protobuf:"varint,4,opt,name=issued_at,json=issuedAt,proto3" json:"issued_at,omitempty"` +} + +func (x *JWTSVID) Reset() { + *x = JWTSVID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_jwtsvid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTSVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTSVID) ProtoMessage() {} + +func (x *JWTSVID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_jwtsvid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTSVID.ProtoReflect.Descriptor instead. +func (*JWTSVID) Descriptor() ([]byte, []int) { + return file_spire_api_types_jwtsvid_proto_rawDescGZIP(), []int{0} +} + +func (x *JWTSVID) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *JWTSVID) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *JWTSVID) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +func (x *JWTSVID) GetIssuedAt() int64 { + if x != nil { + return x.IssuedAt + } + return 0 +} + +var File_spire_api_types_jwtsvid_proto protoreflect.FileDescriptor + +var file_spire_api_types_jwtsvid_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6a, 0x77, 0x74, 0x73, 0x76, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x86, 0x01, 0x0a, 0x07, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_jwtsvid_proto_rawDescOnce sync.Once + file_spire_api_types_jwtsvid_proto_rawDescData = file_spire_api_types_jwtsvid_proto_rawDesc +) + +func file_spire_api_types_jwtsvid_proto_rawDescGZIP() []byte { + file_spire_api_types_jwtsvid_proto_rawDescOnce.Do(func() { + file_spire_api_types_jwtsvid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_jwtsvid_proto_rawDescData) + }) + return file_spire_api_types_jwtsvid_proto_rawDescData +} + +var file_spire_api_types_jwtsvid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_jwtsvid_proto_goTypes = []interface{}{ + (*JWTSVID)(nil), // 0: spire.api.types.JWTSVID + (*SPIFFEID)(nil), // 1: spire.api.types.SPIFFEID +} +var file_spire_api_types_jwtsvid_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.JWTSVID.id:type_name -> spire.api.types.SPIFFEID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_jwtsvid_proto_init() } +func file_spire_api_types_jwtsvid_proto_init() { + if File_spire_api_types_jwtsvid_proto != nil { + return + } + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_jwtsvid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTSVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_jwtsvid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_jwtsvid_proto_goTypes, + DependencyIndexes: file_spire_api_types_jwtsvid_proto_depIdxs, + MessageInfos: file_spire_api_types_jwtsvid_proto_msgTypes, + }.Build() + File_spire_api_types_jwtsvid_proto = out.File + file_spire_api_types_jwtsvid_proto_rawDesc = nil + file_spire_api_types_jwtsvid_proto_goTypes = nil + file_spire_api_types_jwtsvid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto new file mode 100644 index 00000000000..51f840a4352 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/spiffeid.proto"; + +// JWT SPIFFE Verifiable Identity Document. It contains the raw JWT token +// as well as a few denormalized fields for convenience. +message JWTSVID { + // The serialized JWT token. + string token = 1; + + // The SPIFFE ID of the JWT-SVID. + spire.api.types.SPIFFEID id = 2; + + // Expiration timestamp (seconds since Unix epoch). + int64 expires_at = 3; + + // Issuance timestamp (seconds since Unix epoch). + int64 issued_at = 4; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go new file mode 100644 index 00000000000..49ed9323d10 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/selector.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SelectorMatch_MatchBehavior int32 + +const ( + // Indicates that the selectors in this match are equal to the + // candidate selectors, independent of ordering. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_EXACT ["a:1", "b:2"] + // Entries that match: + // - 'e2' + SelectorMatch_MATCH_EXACT SelectorMatch_MatchBehavior = 0 + // Indicates that all candidates which have a non-empty subset + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUBSET ["a:1"] + // Entries that match: + // - 'e1' + SelectorMatch_MATCH_SUBSET SelectorMatch_MatchBehavior = 1 + // Indicates that all candidates which are a superset + // of the provided selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUPERSET ["a:1", "b:2"] + // Entries that match: + // - 'e1' + // - 'e2' + SelectorMatch_MATCH_SUPERSET SelectorMatch_MatchBehavior = 2 + // Indicates that all candidates which have at least one + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_ANY ["a:1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + SelectorMatch_MATCH_ANY SelectorMatch_MatchBehavior = 3 +) + +// Enum value maps for SelectorMatch_MatchBehavior. +var ( + SelectorMatch_MatchBehavior_name = map[int32]string{ + 0: "MATCH_EXACT", + 1: "MATCH_SUBSET", + 2: "MATCH_SUPERSET", + 3: "MATCH_ANY", + } + SelectorMatch_MatchBehavior_value = map[string]int32{ + "MATCH_EXACT": 0, + "MATCH_SUBSET": 1, + "MATCH_SUPERSET": 2, + "MATCH_ANY": 3, + } +) + +func (x SelectorMatch_MatchBehavior) Enum() *SelectorMatch_MatchBehavior { + p := new(SelectorMatch_MatchBehavior) + *p = x + return p +} + +func (x SelectorMatch_MatchBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SelectorMatch_MatchBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_selector_proto_enumTypes[0].Descriptor() +} + +func (SelectorMatch_MatchBehavior) Type() protoreflect.EnumType { + return &file_spire_api_types_selector_proto_enumTypes[0] +} + +func (x SelectorMatch_MatchBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SelectorMatch_MatchBehavior.Descriptor instead. +func (SelectorMatch_MatchBehavior) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{1, 0} +} + +type Selector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the selector. This is typically the name of the plugin that + // produces the selector. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The value of the selector. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Selector) Reset() { + *x = Selector{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_selector_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Selector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Selector) ProtoMessage() {} + +func (x *Selector) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_selector_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Selector.ProtoReflect.Descriptor instead. +func (*Selector) Descriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{0} +} + +func (x *Selector) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Selector) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type SelectorMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of selectors to match on. + Selectors []*Selector `protobuf:"bytes,1,rep,name=selectors,proto3" json:"selectors,omitempty"` + // How to match the selectors. + Match SelectorMatch_MatchBehavior `protobuf:"varint,2,opt,name=match,proto3,enum=spire.api.types.SelectorMatch_MatchBehavior" json:"match,omitempty"` +} + +func (x *SelectorMatch) Reset() { + *x = SelectorMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_selector_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectorMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectorMatch) ProtoMessage() {} + +func (x *SelectorMatch) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_selector_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectorMatch.ProtoReflect.Descriptor instead. +func (*SelectorMatch) Descriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{1} +} + +func (x *SelectorMatch) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *SelectorMatch) GetMatch() SelectorMatch_MatchBehavior { + if x != nil { + return x.Match + } + return SelectorMatch_MATCH_EXACT +} + +var File_spire_api_types_selector_proto protoreflect.FileDescriptor + +var file_spire_api_types_selector_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x22, 0x34, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x42, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, + 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x55, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x41, 0x54, 0x43, + 0x48, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4d, 0x41, + 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x53, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0x03, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_selector_proto_rawDescOnce sync.Once + file_spire_api_types_selector_proto_rawDescData = file_spire_api_types_selector_proto_rawDesc +) + +func file_spire_api_types_selector_proto_rawDescGZIP() []byte { + file_spire_api_types_selector_proto_rawDescOnce.Do(func() { + file_spire_api_types_selector_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_selector_proto_rawDescData) + }) + return file_spire_api_types_selector_proto_rawDescData +} + +var file_spire_api_types_selector_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_selector_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_selector_proto_goTypes = []interface{}{ + (SelectorMatch_MatchBehavior)(0), // 0: spire.api.types.SelectorMatch.MatchBehavior + (*Selector)(nil), // 1: spire.api.types.Selector + (*SelectorMatch)(nil), // 2: spire.api.types.SelectorMatch +} +var file_spire_api_types_selector_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.SelectorMatch.selectors:type_name -> spire.api.types.Selector + 0, // 1: spire.api.types.SelectorMatch.match:type_name -> spire.api.types.SelectorMatch.MatchBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_selector_proto_init() } +func file_spire_api_types_selector_proto_init() { + if File_spire_api_types_selector_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_selector_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Selector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_selector_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectorMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_selector_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_selector_proto_goTypes, + DependencyIndexes: file_spire_api_types_selector_proto_depIdxs, + EnumInfos: file_spire_api_types_selector_proto_enumTypes, + MessageInfos: file_spire_api_types_selector_proto_msgTypes, + }.Build() + File_spire_api_types_selector_proto = out.File + file_spire_api_types_selector_proto_rawDesc = nil + file_spire_api_types_selector_proto_goTypes = nil + file_spire_api_types_selector_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto new file mode 100644 index 00000000000..522017587f1 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Selector { + // The type of the selector. This is typically the name of the plugin that + // produces the selector. + string type = 1; + + // The value of the selector. + string value = 2; +} + +message SelectorMatch { + enum MatchBehavior { + // Indicates that the selectors in this match are equal to the + // candidate selectors, independent of ordering. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_EXACT ["a:1", "b:2"] + // Entries that match: + // - 'e2' + MATCH_EXACT = 0; + + // Indicates that all candidates which have a non-empty subset + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUBSET ["a:1"] + // Entries that match: + // - 'e1' + MATCH_SUBSET = 1; + + // Indicates that all candidates which are a superset + // of the provided selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUPERSET ["a:1", "b:2"] + // Entries that match: + // - 'e1' + // - 'e2' + MATCH_SUPERSET = 2; + + // Indicates that all candidates which have at least one + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_ANY ["a:1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + MATCH_ANY = 3; + + } + + // The set of selectors to match on. + repeated Selector selectors = 1; + + // How to match the selectors. + MatchBehavior match = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go new file mode 100644 index 00000000000..9fd4b6aa033 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go @@ -0,0 +1,162 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/spiffeid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A SPIFFE ID, consisting of the trust domain name and a path portions of +// the SPIFFE ID URI. +type SPIFFEID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Trust domain portion the SPIFFE ID (e.g. "example.org") + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // The path component of the SPIFFE ID (e.g. "/foo/bar/baz"). The path + // SHOULD have a leading slash. Consumers MUST normalize the path before + // making any sort of comparison between IDs. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *SPIFFEID) Reset() { + *x = SPIFFEID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_spiffeid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFEID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFEID) ProtoMessage() {} + +func (x *SPIFFEID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_spiffeid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFEID.ProtoReflect.Descriptor instead. +func (*SPIFFEID) Descriptor() ([]byte, []int) { + return file_spire_api_types_spiffeid_proto_rawDescGZIP(), []int{0} +} + +func (x *SPIFFEID) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *SPIFFEID) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_spire_api_types_spiffeid_proto protoreflect.FileDescriptor + +var file_spire_api_types_spiffeid_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x22, 0x41, 0x0a, 0x08, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x12, 0x21, 0x0a, + 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, + 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_spiffeid_proto_rawDescOnce sync.Once + file_spire_api_types_spiffeid_proto_rawDescData = file_spire_api_types_spiffeid_proto_rawDesc +) + +func file_spire_api_types_spiffeid_proto_rawDescGZIP() []byte { + file_spire_api_types_spiffeid_proto_rawDescOnce.Do(func() { + file_spire_api_types_spiffeid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_spiffeid_proto_rawDescData) + }) + return file_spire_api_types_spiffeid_proto_rawDescData +} + +var file_spire_api_types_spiffeid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_spiffeid_proto_goTypes = []interface{}{ + (*SPIFFEID)(nil), // 0: spire.api.types.SPIFFEID +} +var file_spire_api_types_spiffeid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_spiffeid_proto_init() } +func file_spire_api_types_spiffeid_proto_init() { + if File_spire_api_types_spiffeid_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_spiffeid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFEID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_spiffeid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_spiffeid_proto_goTypes, + DependencyIndexes: file_spire_api_types_spiffeid_proto_depIdxs, + MessageInfos: file_spire_api_types_spiffeid_proto_msgTypes, + }.Build() + File_spire_api_types_spiffeid_proto = out.File + file_spire_api_types_spiffeid_proto_rawDesc = nil + file_spire_api_types_spiffeid_proto_goTypes = nil + file_spire_api_types_spiffeid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto new file mode 100644 index 00000000000..37ad35b273a --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +// A SPIFFE ID, consisting of the trust domain name and a path portions of +// the SPIFFE ID URI. +message SPIFFEID { + // Trust domain portion the SPIFFE ID (e.g. "example.org") + string trust_domain = 1; + + // The path component of the SPIFFE ID (e.g. "/foo/bar/baz"). The path + // SHOULD have a leading slash. Consumers MUST normalize the path before + // making any sort of comparison between IDs. + string path = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go new file mode 100644 index 00000000000..210d8056c22 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/status.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PermissionDeniedDetails_Reason int32 + +const ( + // Reason unknown. + PermissionDeniedDetails_UNKNOWN PermissionDeniedDetails_Reason = 0 + // Agent identity has expired. + PermissionDeniedDetails_AGENT_EXPIRED PermissionDeniedDetails_Reason = 1 + // Identity is not an attested agent. + PermissionDeniedDetails_AGENT_NOT_ATTESTED PermissionDeniedDetails_Reason = 2 + // Identity is not the active agent identity. + PermissionDeniedDetails_AGENT_NOT_ACTIVE PermissionDeniedDetails_Reason = 3 + // Agent has been banned. + PermissionDeniedDetails_AGENT_BANNED PermissionDeniedDetails_Reason = 4 +) + +// Enum value maps for PermissionDeniedDetails_Reason. +var ( + PermissionDeniedDetails_Reason_name = map[int32]string{ + 0: "UNKNOWN", + 1: "AGENT_EXPIRED", + 2: "AGENT_NOT_ATTESTED", + 3: "AGENT_NOT_ACTIVE", + 4: "AGENT_BANNED", + } + PermissionDeniedDetails_Reason_value = map[string]int32{ + "UNKNOWN": 0, + "AGENT_EXPIRED": 1, + "AGENT_NOT_ATTESTED": 2, + "AGENT_NOT_ACTIVE": 3, + "AGENT_BANNED": 4, + } +) + +func (x PermissionDeniedDetails_Reason) Enum() *PermissionDeniedDetails_Reason { + p := new(PermissionDeniedDetails_Reason) + *p = x + return p +} + +func (x PermissionDeniedDetails_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PermissionDeniedDetails_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_status_proto_enumTypes[0].Descriptor() +} + +func (PermissionDeniedDetails_Reason) Type() protoreflect.EnumType { + return &file_spire_api_types_status_proto_enumTypes[0] +} + +func (x PermissionDeniedDetails_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PermissionDeniedDetails_Reason.Descriptor instead. +func (PermissionDeniedDetails_Reason) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{1, 0} +} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A status code, which should be an enum value of google.rpc.Code. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type PermissionDeniedDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason PermissionDeniedDetails_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=spire.api.types.PermissionDeniedDetails_Reason" json:"reason,omitempty"` +} + +func (x *PermissionDeniedDetails) Reset() { + *x = PermissionDeniedDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PermissionDeniedDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PermissionDeniedDetails) ProtoMessage() {} + +func (x *PermissionDeniedDetails) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_status_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PermissionDeniedDetails.ProtoReflect.Descriptor instead. +func (*PermissionDeniedDetails) Descriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{1} +} + +func (x *PermissionDeniedDetails) GetReason() PermissionDeniedDetails_Reason { + if x != nil { + return x.Reason + } + return PermissionDeniedDetails_UNKNOWN +} + +var File_spire_api_types_status_proto protoreflect.FileDescriptor + +var file_spire_api_types_status_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x68, 0x0a, 0x06, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x58, 0x50, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x14, + 0x0a, 0x10, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x42, 0x41, + 0x4e, 0x4e, 0x45, 0x44, 0x10, 0x04, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_status_proto_rawDescOnce sync.Once + file_spire_api_types_status_proto_rawDescData = file_spire_api_types_status_proto_rawDesc +) + +func file_spire_api_types_status_proto_rawDescGZIP() []byte { + file_spire_api_types_status_proto_rawDescOnce.Do(func() { + file_spire_api_types_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_status_proto_rawDescData) + }) + return file_spire_api_types_status_proto_rawDescData +} + +var file_spire_api_types_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_status_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_status_proto_goTypes = []interface{}{ + (PermissionDeniedDetails_Reason)(0), // 0: spire.api.types.PermissionDeniedDetails.Reason + (*Status)(nil), // 1: spire.api.types.Status + (*PermissionDeniedDetails)(nil), // 2: spire.api.types.PermissionDeniedDetails +} +var file_spire_api_types_status_proto_depIdxs = []int32{ + 0, // 0: spire.api.types.PermissionDeniedDetails.reason:type_name -> spire.api.types.PermissionDeniedDetails.Reason + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_status_proto_init() } +func file_spire_api_types_status_proto_init() { + if File_spire_api_types_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PermissionDeniedDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_status_proto_goTypes, + DependencyIndexes: file_spire_api_types_status_proto_depIdxs, + EnumInfos: file_spire_api_types_status_proto_enumTypes, + MessageInfos: file_spire_api_types_status_proto_msgTypes, + }.Build() + File_spire_api_types_status_proto = out.File + file_spire_api_types_status_proto_rawDesc = nil + file_spire_api_types_status_proto_goTypes = nil + file_spire_api_types_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto new file mode 100644 index 00000000000..498bd10bb31 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Status { + // A status code, which should be an enum value of google.rpc.Code. + int32 code = 1; + + // A developer-facing error message. + string message = 2; +} + +message PermissionDeniedDetails { + enum Reason { + // Reason unknown. + UNKNOWN = 0; + // Agent identity has expired. + AGENT_EXPIRED = 1; + // Identity is not an attested agent. + AGENT_NOT_ATTESTED = 2; + // Identity is not the active agent identity. + AGENT_NOT_ACTIVE = 3; + // Agent has been banned. + AGENT_BANNED = 4; + } + Reason reason = 1; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go new file mode 100644 index 00000000000..450465e7ae9 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/x509svid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// X.509 SPIFFE Verifiable Identity Document. It contains the raw X.509 +// certificate data as well as a few denormalized fields for convenience. +type X509SVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Certificate and intermediates required to form a chain of trust back to + // the X.509 authorities of the trust domain (ASN.1 DER encoded). + CertChain [][]byte `protobuf:"bytes,1,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // SPIFFE ID of the SVID. + Id *SPIFFEID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Expiration timestamp (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *X509SVID) Reset() { + *x = X509SVID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_x509svid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509SVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509SVID) ProtoMessage() {} + +func (x *X509SVID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_x509svid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509SVID.ProtoReflect.Descriptor instead. +func (*X509SVID) Descriptor() ([]byte, []int) { + return file_spire_api_types_x509svid_proto_rawDescGZIP(), []int{0} +} + +func (x *X509SVID) GetCertChain() [][]byte { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *X509SVID) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *X509SVID) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +var File_spire_api_types_x509svid_proto protoreflect.FileDescriptor + +var file_spire_api_types_x509svid_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x73, 0x0a, 0x08, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, + 0x45, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_x509svid_proto_rawDescOnce sync.Once + file_spire_api_types_x509svid_proto_rawDescData = file_spire_api_types_x509svid_proto_rawDesc +) + +func file_spire_api_types_x509svid_proto_rawDescGZIP() []byte { + file_spire_api_types_x509svid_proto_rawDescOnce.Do(func() { + file_spire_api_types_x509svid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_x509svid_proto_rawDescData) + }) + return file_spire_api_types_x509svid_proto_rawDescData +} + +var file_spire_api_types_x509svid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_x509svid_proto_goTypes = []interface{}{ + (*X509SVID)(nil), // 0: spire.api.types.X509SVID + (*SPIFFEID)(nil), // 1: spire.api.types.SPIFFEID +} +var file_spire_api_types_x509svid_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.X509SVID.id:type_name -> spire.api.types.SPIFFEID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_x509svid_proto_init() } +func file_spire_api_types_x509svid_proto_init() { + if File_spire_api_types_x509svid_proto != nil { + return + } + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_x509svid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509SVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_x509svid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_x509svid_proto_goTypes, + DependencyIndexes: file_spire_api_types_x509svid_proto_depIdxs, + MessageInfos: file_spire_api_types_x509svid_proto_msgTypes, + }.Build() + File_spire_api_types_x509svid_proto = out.File + file_spire_api_types_x509svid_proto_rawDesc = nil + file_spire_api_types_x509svid_proto_goTypes = nil + file_spire_api_types_x509svid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto new file mode 100644 index 00000000000..e4b1466ff20 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/spiffeid.proto"; + +// X.509 SPIFFE Verifiable Identity Document. It contains the raw X.509 +// certificate data as well as a few denormalized fields for convenience. +message X509SVID { + // Certificate and intermediates required to form a chain of trust back to + // the X.509 authorities of the trust domain (ASN.1 DER encoded). + repeated bytes cert_chain = 1; + + // SPIFFE ID of the SVID. + spire.api.types.SPIFFEID id = 2; + + // Expiration timestamp (seconds since Unix epoch). + int64 expires_at = 3; +} diff --git a/vendor/github.com/zeebo/errs/.gitignore b/vendor/github.com/zeebo/errs/.gitignore new file mode 100644 index 00000000000..722d5e71d93 --- /dev/null +++ b/vendor/github.com/zeebo/errs/.gitignore @@ -0,0 +1 @@ +.vscode diff --git a/vendor/github.com/zeebo/errs/AUTHORS b/vendor/github.com/zeebo/errs/AUTHORS new file mode 100644 index 00000000000..a970ee57732 --- /dev/null +++ b/vendor/github.com/zeebo/errs/AUTHORS @@ -0,0 +1,4 @@ +Egon Elbre +Jeff Wendling +JT Olio +Kaloyan Raev \ No newline at end of file diff --git a/vendor/github.com/zeebo/errs/LICENSE b/vendor/github.com/zeebo/errs/LICENSE new file mode 100644 index 00000000000..3ba91930ed2 --- /dev/null +++ b/vendor/github.com/zeebo/errs/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 The Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zeebo/errs/README.md b/vendor/github.com/zeebo/errs/README.md new file mode 100644 index 00000000000..be6bc0b4065 --- /dev/null +++ b/vendor/github.com/zeebo/errs/README.md @@ -0,0 +1,235 @@ +# errs + +[![GoDoc](https://godoc.org/github.com/zeebo/errs?status.svg)](https://godoc.org/github.com/zeebo/errs) +[![Sourcegraph](https://sourcegraph.com/github.com/zeebo/errs/-/badge.svg)](https://sourcegraph.com/github.com/zeebo/errs?badge) +[![Go Report Card](https://goreportcard.com/badge/github.com/zeebo/errs)](https://goreportcard.com/report/github.com/zeebo/errs) + +errs is a package for making errors friendly and easy. + +### Creating Errors + +The easiest way to use it, is to use the package level [New][New] function. +It's much like `fmt.Errorf`, but better. For example: + +```go +func checkThing() error { + return errs.New("what's up with %q?", "zeebo") +} +``` + +Why is it better? Errors come with a stack trace that is only printed +when a `"+"` character is used in the format string. This should retain the +benefits of being able to diagnose where and why errors happen, without all of +the noise of printing a stack trace in every situation. For example: + +```go +func doSomeRealWork() { + err := checkThing() + if err != nil { + fmt.Printf("%+v\n", err) // contains stack trace if it's a errs error. + fmt.Printf("%v\n", err) // does not contain a stack trace + return + } +} +``` + +### Error Classes + +You can create a [Class][Class] of errors and check if any error was created by +that class. The class name is prefixed to all of the errors it creates. For example: + +```go +var Unauthorized = errs.Class("unauthorized") + +func checkUser(username, password string) error { + if username != "zeebo" { + return Unauthorized.New("who is %q?", username) + } + if password != "hunter2" { + return Unauthorized.New("that's not a good password, jerkmo!") + } + return nil +} + +func handleRequest() { + if err := checkUser("zeebo", "hunter3"); Unauthorized.Has(err) { + fmt.Println(err) + } + + // output: + // unauthorized: that's not a good password, jerkmo! +} +``` + +Classes can also [Wrap][ClassWrap] other errors, and errors may be wrapped +multiple times. For example: + +```go +var ( + Error = errs.Class("mypackage") + Unauthorized = errs.Class("unauthorized") +) + +func deep3() error { + return fmt.Errorf("ouch") +} + +func deep2() error { + return Unauthorized.Wrap(deep3()) +} + +func deep1() error { + return Error.Wrap(deep2()) +} + +func deep() { + fmt.Println(deep1()) + + // output: + // mypackage: unauthorized: ouch +} +``` + +In the above example, both `Error.Has(deep1())` and `Unauthorized.Has(deep1())` +would return `true`, and the stack trace would only be recorded once at the +`deep2` call. + +In addition, when an error has been wrapped, wrapping it again with the same class will +not do anything. For example: + +```go +func doubleWrap() { + fmt.Println(Error.Wrap(Error.New("foo"))) + + // output: + // mypackage: foo +} +``` + +This is to make it an easier decision if you should wrap or not (you should). + +### Utilities + +[Classes][Classes] is a helper function to get a slice of classes that an error +has. The latest wrap is first in the slice. For example: + +```go +func getClasses() { + classes := errs.Classes(deep1()) + fmt.Println(classes[0] == &Error) + fmt.Println(classes[1] == &Unauthorized) + + // output: + // true + // true +} +``` + +Finally, a helper function, [Unwrap][Unwrap] is provided to get the +wrapped error in cases where you might want to inspect details. For +example: + +```go +var Error = errs.Class("mypackage") + +func getHandle() (*os.File, error) { + fh, err := os.Open("neat_things") + if err != nil { + return nil, Error.Wrap(err) + } + return fh, nil +} + +func checkForNeatThings() { + fh, err := getHandle() + if os.IsNotExist(errs.Unwrap(err)) { + panic("no neat things?!") + } + if err != nil { + panic("phew, at least there are neat things, even if i can't see them") + } + fh.Close() +} +``` + +It knows about both the `Cause() error` and `Unwrap() error` methods that are +often used in the community, and will call them as many times as possible. + +### Defer + +The package also provides [WrapP][WrapP] versions of [Wrap][Wrap] that are useful +in defer contexts. For example: + +```go +func checkDefer() (err error) { + defer Error.WrapP(&err) + + fh, err := os.Open("secret_stash") + if err != nil { + return nil, err + } + return fh.Close() +} +``` + +### Groups + +[Groups][Group] allow one to collect a set of errors. For example: + +```go +func tonsOfErrors() error { + var group errs.Group + for _, work := range someWork { + group.Add(maybeErrors(work)) + } + return group.Err() +} +``` + +Some things to note: + +- The [Add][GroupAdd] method only adds to the group if the passed in error is non-nil. +- The [Err][GroupErr] method returns an error only if non-nil errors have been added, and + additionally returns just the error if only one error was added. Thus, we always + have that if you only call `group.Add(err)`, then `group.Err() == err`. + +The returned error will format itself similarly: + +```go +func groupFormat() { + var group errs.Group + group.Add(errs.New("first")) + group.Add(errs.New("second")) + err := group.Err() + + fmt.Printf("%v\n", err) + fmt.Println() + fmt.Printf("%+v\n", err) + + // output: + // first; second + // + // group: + // --- first + // ... stack trace + // --- second + // ... stack trace +} +``` + +### Contributing + +errs is released under an MIT License. If you want to contribute, be sure to +add yourself to the list in AUTHORS. + +[New]: https://godoc.org/github.com/zeebo/errs#New +[Wrap]: https://godoc.org/github.com/zeebo/errs#Wrap +[WrapP]: https://godoc.org/github.com/zeebo/errs#WrapP +[Class]: https://godoc.org/github.com/zeebo/errs#Class +[ClassNew]: https://godoc.org/github.com/zeebo/errs#Class.New +[ClassWrap]: https://godoc.org/github.com/zeebo/errs#Class.Wrap +[Unwrap]: https://godoc.org/github.com/zeebo/errs#Unwrap +[Classes]: https://godoc.org/github.com/zeebo/errs#Classes +[Group]: https://godoc.org/github.com/zeebo/errs#Group +[GroupAdd]: https://godoc.org/github.com/zeebo/errs#Group.Add +[GroupErr]: https://godoc.org/github.com/zeebo/errs#Group.Err diff --git a/vendor/github.com/zeebo/errs/errs.go b/vendor/github.com/zeebo/errs/errs.go new file mode 100644 index 00000000000..0705bac4ac0 --- /dev/null +++ b/vendor/github.com/zeebo/errs/errs.go @@ -0,0 +1,296 @@ +// Package errs provides a simple error package with stack traces. +package errs + +import ( + "fmt" + "io" + "runtime" +) + +// Namer is implemented by all errors returned in this package. It returns a +// name for the class of error it is, and a boolean indicating if the name is +// valid. +type Namer interface{ Name() (string, bool) } + +// Causer is implemented by all errors returned in this package. It returns +// the underlying cause of the error, or nil if there is no underlying cause. +type Causer interface{ Cause() error } + +// unwrapper is implemented by all errors returned in this package. It returns +// the underlying cause of the error, or nil if there is no underlying error. +type unwrapper interface{ Unwrap() error } + +// ungrouper is implemented by combinedError returned in this package. It +// returns all underlying errors, or nil if there is no underlying error. +type ungrouper interface{ Ungroup() []error } + +// New returns an error not contained in any class. This is the same as calling +// fmt.Errorf(...) except it captures a stack trace on creation. +func New(format string, args ...interface{}) error { + return (*Class).create(nil, 3, fmt.Errorf(format, args...)) +} + +// Wrap returns an error not contained in any class. It just associates a stack +// trace with the error. Wrap returns nil if err is nil. +func Wrap(err error) error { + return (*Class).create(nil, 3, err) +} + +// WrapP stores into the error pointer if it contains a non-nil error an error not +// contained in any class. It just associates a stack trace with the error. WrapP +// does nothing if the pointer or pointed at error is nil. +func WrapP(err *error) { + if err != nil && *err != nil { + *err = (*Class).create(nil, 3, *err) + } +} + +// Often, we call Cause as much as possible. Since comparing arbitrary +// interfaces with equality isn't panic safe, we only loop up to 100 +// times to ensure that a poor implementation that causes a cycle does +// not run forever. +const maxCause = 100 + +// Unwrap returns the underlying error, if any, or just the error. +func Unwrap(err error) error { + for i := 0; err != nil && i < maxCause; i++ { + var nerr error + + switch e := err.(type) { + case Causer: + nerr = e.Cause() + + case unwrapper: + nerr = e.Unwrap() + } + + if nerr == nil { + return err + } + err = nerr + } + + return err +} + +// Classes returns all the classes that have wrapped the error. +func Classes(err error) (classes []*Class) { + causes := 0 + for { + switch e := err.(type) { + case *errorT: + if e.class != nil { + classes = append(classes, e.class) + } + err = e.err + continue + + case Causer: + err = e.Cause() + + case unwrapper: + err = e.Unwrap() + + default: + return classes + } + + if causes >= maxCause { + return classes + } + causes++ + } +} + +// Is checks if any of the underlying errors matches target +func Is(err, target error) bool { + return IsFunc(err, func(err error) bool { + return err == target + }) +} + +// IsFunc checks if any of the underlying errors matches the func +func IsFunc(err error, is func(err error) bool) bool { + causes := 0 + errs := []error{err} + + for len(errs) > 0 { + var next []error + for _, err := range errs { + if is(err) { + return true + } + + switch e := err.(type) { + case ungrouper: + ungrouped := e.Ungroup() + for _, unerr := range ungrouped { + if unerr != nil { + next = append(next, unerr) + } + } + case Causer: + cause := e.Cause() + if cause != nil { + next = append(next, cause) + } + case unwrapper: + unwrapped := e.Unwrap() + if unwrapped != nil { + next = append(next, unwrapped) + } + } + + if causes >= maxCause { + return false + } + causes++ + } + errs = next + } + + return false +} + +// +// error classes +// + +// Class represents a class of errors. You can construct errors, and check if +// errors are part of the class. +type Class string + +// Has returns true if the passed in error was wrapped by this class. +func (c *Class) Has(err error) bool { + for { + errt, ok := err.(*errorT) + if !ok { + return false + } + if errt.class == c { + return true + } + err = errt.err + } +} + +// New constructs an error with the format string that will be contained by +// this class. This is the same as calling Wrap(fmt.Errorf(...)). +func (c *Class) New(format string, args ...interface{}) error { + return c.create(3, fmt.Errorf(format, args...)) +} + +// Wrap returns a new error based on the passed in error that is contained in +// this class. Wrap returns nil if err is nil. +func (c *Class) Wrap(err error) error { + return c.create(3, err) +} + +// WrapP stores into the error pointer if it contains a non-nil error an error contained +// in this class. WrapP does nothing if the pointer or pointed at error is nil. +func (c *Class) WrapP(err *error) { + if err != nil && *err != nil { + *err = c.create(3, *err) + } +} + +// create constructs the error, or just adds the class to the error, keeping +// track of the stack if it needs to construct it. +func (c *Class) create(depth int, err error) error { + if err == nil { + return nil + } + + var pcs []uintptr + if err, ok := err.(*errorT); ok { + if c == nil || err.class == c { + return err + } + pcs = err.pcs + } + + errt := &errorT{ + class: c, + err: err, + pcs: pcs, + } + + if errt.pcs == nil { + errt.pcs = make([]uintptr, 64) + n := runtime.Callers(depth, errt.pcs) + errt.pcs = errt.pcs[:n:n] + } + + return errt +} + +// +// errors +// + +// errorT is the type of errors returned from this package. +type errorT struct { + class *Class + err error + pcs []uintptr +} + +var ( // ensure *errorT implements the helper interfaces. + _ Namer = (*errorT)(nil) + _ Causer = (*errorT)(nil) + _ error = (*errorT)(nil) +) + +// errorT implements the error interface. +func (e *errorT) Error() string { + return fmt.Sprintf("%v", e) +} + +// Format handles the formatting of the error. Using a "+" on the format string +// specifier will also write the stack trace. +func (e *errorT) Format(f fmt.State, c rune) { + sep := "" + if e.class != nil && *e.class != "" { + fmt.Fprintf(f, "%s", string(*e.class)) + sep = ": " + } + if text := e.err.Error(); len(text) > 0 { + fmt.Fprintf(f, "%s%v", sep, text) + } + if f.Flag(int('+')) { + summarizeStack(f, e.pcs) + } +} + +// Cause implements the interface wrapping errors are expected to implement +// to allow getting at underlying causes. +func (e *errorT) Cause() error { + return e.err +} + +// Unwrap implements the draft design for error inspection. Since this is +// on an unexported type, it should not be hard to maintain going forward +// given that it also is the exact same semantics as Cause. +func (e *errorT) Unwrap() error { + return e.err +} + +// Name returns the name for the error, which is the first wrapping class. +func (e *errorT) Name() (string, bool) { + if e.class == nil { + return "", false + } + return string(*e.class), true +} + +// summarizeStack writes stack line entries to the writer. +func summarizeStack(w io.Writer, pcs []uintptr) { + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + if !more { + return + } + fmt.Fprintf(w, "\n\t%s:%d", frame.Function, frame.Line) + } +} diff --git a/vendor/github.com/zeebo/errs/group.go b/vendor/github.com/zeebo/errs/group.go new file mode 100644 index 00000000000..e5997ec55d7 --- /dev/null +++ b/vendor/github.com/zeebo/errs/group.go @@ -0,0 +1,100 @@ +package errs + +import ( + "fmt" + "io" +) + +// Group is a list of errors. +type Group []error + +// Combine combines multiple non-empty errors into a single error. +func Combine(errs ...error) error { + var group Group + group.Add(errs...) + return group.Err() +} + +// Add adds non-empty errors to the Group. +func (group *Group) Add(errs ...error) { + for _, err := range errs { + if err != nil { + *group = append(*group, err) + } + } +} + +// Err returns an error containing all of the non-nil errors. +// If there was only one error, it will return it. +// If there were none, it returns nil. +func (group Group) Err() error { + sanitized := group.sanitize() + if len(sanitized) == 0 { + return nil + } + if len(sanitized) == 1 { + return sanitized[0] + } + return combinedError(sanitized) +} + +// sanitize returns group that doesn't contain nil-s +func (group Group) sanitize() Group { + // sanity check for non-nil errors + for i, err := range group { + if err == nil { + sanitized := make(Group, 0, len(group)-1) + sanitized = append(sanitized, group[:i]...) + sanitized.Add(group[i+1:]...) + return sanitized + } + } + + return group +} + +// combinedError is a list of non-empty errors +type combinedError []error + +// Cause returns the first error. +func (group combinedError) Cause() error { + if len(group) > 0 { + return group[0] + } + return nil +} + +// Unwrap returns the first error. +func (group combinedError) Unwrap() error { + return group.Cause() +} + +// Ungroup returns all errors. +func (group combinedError) Ungroup() []error { + return group +} + +// Error returns error string delimited by semicolons. +func (group combinedError) Error() string { return fmt.Sprintf("%v", group) } + +// Format handles the formatting of the error. Using a "+" on the format +// string specifier will cause the errors to be formatted with "+" and +// delimited by newlines. They are delimited by semicolons otherwise. +func (group combinedError) Format(f fmt.State, c rune) { + delim := "; " + if f.Flag(int('+')) { + io.WriteString(f, "group:\n--- ") + delim = "\n--- " + } + + for i, err := range group { + if i != 0 { + io.WriteString(f, delim) + } + if formatter, ok := err.(fmt.Formatter); ok { + formatter.Format(f, c) + } else { + fmt.Fprintf(f, "%v", err) + } + } +} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 00000000000..a7828345fcc --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the β€œEd25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the β€œseed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 00000000000..593f6530084 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md index cd03f8c7688..52338d004ce 100644 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly. - **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. - - `make all` to test everything, OR - - `make vet` to catch vet errors - - `make test` to run the tests - - `make testrace` to run tests in race mode + - `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors + - `go test -cpu 1,4 -timeout 7m ./...` to run the tests + - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode - Exceptions to the rules can be made if there's a compelling reason for doing so. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index bcc6f5451c9..f7a7697cad0 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -27,6 +27,7 @@ import ( "net" "strings" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal" @@ -192,7 +193,7 @@ type BuildOptions struct { // server can ignore this field. Authority string // ChannelzParentID is the parent ClientConn's channelz ID. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // CustomUserAgent is the custom user agent set on the parent ClientConn. // The balancer should set the same custom user agent if it creates a // ClientConn. diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index f4ea6174682..b1c23eaae0d 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -20,130 +20,178 @@ package grpc import ( "fmt" + "strings" "sync" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" ) -// scStateUpdate contains the subConn and the new state it changed to. -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} +// ccBalancerWrapper sits between the ClientConn and the Balancer. +// +// ccBalancerWrapper implements methods corresponding to the ones on the +// balancer.Balancer interface. The ClientConn is free to call these methods +// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn +// to the Balancer happen synchronously and in order. +// +// ccBalancerWrapper also implements the balancer.ClientConn interface and is +// passed to the Balancer implementations. It invokes unexported methods on the +// ClientConn to handle these calls from the Balancer. +// +// It uses the gracefulswitch.Balancer internally to ensure that balancer +// switches happen in a graceful manner. +type ccBalancerWrapper struct { + cc *ClientConn -// exitIdle contains no data and is just a signal sent on the updateCh in -// ccBalancerWrapper to instruct the balancer to exit idle. -type exitIdle struct{} + // Since these fields are accessed only from handleXxx() methods which are + // synchronized by the watcher goroutine, we do not need a mutex to protect + // these fields. + balancer *gracefulswitch.Balancer + curBalancerName string -// ccBalancerWrapper is a wrapper on top of cc for balancers. -// It implements balancer.ClientConn interface. -type ccBalancerWrapper struct { - cc *ClientConn - balancerMu sync.Mutex // synchronizes calls to the balancer - balancer balancer.Balancer - hasExitIdle bool - updateCh *buffer.Unbounded - closed *grpcsync.Event - done *grpcsync.Event - - mu sync.Mutex - subConns map[*acBalancerWrapper]struct{} + updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). + resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. + closed *grpcsync.Event // Indicates if close has been called. + done *grpcsync.Event // Indicates if close has completed its work. } -func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { +// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer +// is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { ccb := &ccBalancerWrapper{ cc: cc, updateCh: buffer.NewUnbounded(), + resultCh: buffer.NewUnbounded(), closed: grpcsync.NewEvent(), done: grpcsync.NewEvent(), - subConns: make(map[*acBalancerWrapper]struct{}), } go ccb.watcher() - ccb.balancer = b.Build(ccb, bopts) - _, ccb.hasExitIdle = ccb.balancer.(balancer.ExitIdler) + ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// watcher balancer functions sequentially, so the balancer can be implemented -// lock-free. +// The following xxxUpdate structs wrap the arguments received as part of the +// corresponding update. The watcher goroutine uses the 'type' of the update to +// invoke the appropriate handler routine to handle the update. + +type ccStateUpdate struct { + ccs *balancer.ClientConnState +} + +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State + err error +} + +type exitIdleUpdate struct{} + +type resolverErrorUpdate struct { + err error +} + +type switchToUpdate struct { + name string +} + +type subConnUpdate struct { + acbw *acBalancerWrapper +} + +// watcher is a long-running goroutine which reads updates from a channel and +// invokes corresponding methods on the underlying balancer. It ensures that +// these methods are invoked in a synchronous fashion. It also ensures that +// these methods are invoked in the order in which the updates were received. func (ccb *ccBalancerWrapper) watcher() { for { select { - case t := <-ccb.updateCh.Get(): + case u := <-ccb.updateCh.Get(): ccb.updateCh.Load() if ccb.closed.HasFired() { break } - switch u := t.(type) { + switch update := u.(type) { + case *ccStateUpdate: + ccb.handleClientConnStateChange(update.ccs) case *scStateUpdate: - ccb.balancerMu.Lock() - ccb.balancer.UpdateSubConnState(u.sc, balancer.SubConnState{ConnectivityState: u.state, ConnectionError: u.err}) - ccb.balancerMu.Unlock() - case *acBalancerWrapper: - ccb.mu.Lock() - if ccb.subConns != nil { - delete(ccb.subConns, u) - ccb.cc.removeAddrConn(u.getAddrConn(), errConnDrain) - } - ccb.mu.Unlock() - case exitIdle: - if ccb.cc.GetState() == connectivity.Idle { - if ei, ok := ccb.balancer.(balancer.ExitIdler); ok { - // We already checked that the balancer implements - // ExitIdle before pushing the event to updateCh, but - // check conditionally again as defensive programming. - ccb.balancerMu.Lock() - ei.ExitIdle() - ccb.balancerMu.Unlock() - } - } + ccb.handleSubConnStateChange(update) + case *exitIdleUpdate: + ccb.handleExitIdle() + case *resolverErrorUpdate: + ccb.handleResolverError(update.err) + case *switchToUpdate: + ccb.handleSwitchTo(update.name) + case *subConnUpdate: + ccb.handleRemoveSubConn(update.acbw) default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", t, t) + logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) } case <-ccb.closed.Done(): } if ccb.closed.HasFired() { - ccb.balancerMu.Lock() - ccb.balancer.Close() - ccb.balancerMu.Unlock() - ccb.mu.Lock() - scs := ccb.subConns - ccb.subConns = nil - ccb.mu.Unlock() - ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil}) - ccb.done.Fire() - // Fire done before removing the addr conns. We can safely unblock - // ccb.close and allow the removeAddrConns to happen - // asynchronously. - for acbw := range scs { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) - } + ccb.handleClose() return } } } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// updateClientConnState is invoked by grpc to push a ClientConnState update to +// the underlying balancer. +// +// Unlike other methods invoked by grpc to push updates to the underlying +// balancer, this method cannot simply push the update onto the update channel +// and return. It needs to return the error returned by the underlying balancer +// back to grpc which propagates that to the resolver. +func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { + ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) + + var res interface{} + select { + case res = <-ccb.resultCh.Get(): + ccb.resultCh.Load() + case <-ccb.closed.Done(): + // Return early if the balancer wrapper is closed while we are waiting for + // the underlying balancer to process a ClientConnState update. + return nil + } + // If the returned error is nil, attempting to type assert to error leads to + // panic. So, this needs to handled separately. + if res == nil { + return nil + } + return res.(error) } -func (ccb *ccBalancerWrapper) exitIdle() bool { - if !ccb.hasExitIdle { - return false +// handleClientConnStateChange handles a ClientConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +// +// If the addresses specified in the update contain addresses of type "grpclb" +// and the selected LB policy is not "grpclb", these addresses will be filtered +// out and ccs will be modified with the updated address list. +func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { + if ccb.curBalancerName != grpclbName { + // Filter any grpclb addresses since we don't have the grpclb balancer. + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) + } + ccs.ResolverState.Addresses = addrs } - ccb.updateCh.Put(exitIdle{}) - return true + ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) } -func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { +// updateSubConnState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { // When updating addresses for a SubConn, if the address in use is not in // the new addresses, the old ac will be tearDown() and a new ac will be // created. tearDown() generates a state change with Shutdown state, we @@ -161,44 +209,125 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co }) } -func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() - return ccb.balancer.UpdateClientConnState(*ccs) +// handleSubConnStateChange handles a SubConnState update from the update +// channel and invokes the appropriate method on the underlying balancer. +func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { + ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) +} + +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.updateCh.Put(&exitIdleUpdate{}) +} + +func (ccb *ccBalancerWrapper) handleExitIdle() { + if ccb.cc.GetState() != connectivity.Idle { + return + } + ccb.balancer.ExitIdle() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.balancerMu.Lock() - defer ccb.balancerMu.Unlock() + ccb.updateCh.Put(&resolverErrorUpdate{err: err}) +} + +func (ccb *ccBalancerWrapper) handleResolverError(err error) { ccb.balancer.ResolverError(err) } +// switchTo is invoked by grpc to instruct the balancer wrapper to switch to the +// LB policy identified by name. +// +// ClientConn calls newCCBalancerWrapper() at creation time. Upon receipt of the +// first good update from the name resolver, it determines the LB policy to use +// and invokes the switchTo() method. Upon receipt of every subsequent update +// from the name resolver, it invokes this method. +// +// the ccBalancerWrapper keeps track of the current LB policy name, and skips +// the graceful balancer switching process if the name does not change. +func (ccb *ccBalancerWrapper) switchTo(name string) { + ccb.updateCh.Put(&switchToUpdate{name: name}) +} + +// handleSwitchTo handles a balancer switch update from the update channel. It +// calls the SwitchTo() method on the gracefulswitch.Balancer with a +// balancer.Builder corresponding to name. If no balancer.Builder is registered +// for the given name, it uses the default LB policy which is "pick_first". +func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { + // TODO: Other languages use case-insensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + + // TODO: Ensure that name is a registered LB policy when we get here. + // We currently only validate the `loadBalancingConfig` field. We need to do + // the same for the `loadBalancingPolicy` field and reject the service config + // if the specified policy is not registered. + builder := balancer.Get(name) + if builder == nil { + channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) + builder = newPickfirstBuilder() + } else { + channelz.Infof(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q", name) + } + + if err := ccb.balancer.SwitchTo(builder); err != nil { + channelz.Errorf(logger, ccb.cc.channelzID, "Channel failed to build new LB policy %q: %v", name, err) + return + } + ccb.curBalancerName = builder.Name() +} + +// handleRemoveSucConn handles a request from the underlying balancer to remove +// a subConn. +// +// See comments in RemoveSubConn() for more details. +func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) close() { + ccb.closed.Fire() + <-ccb.done.Done() +} + +func (ccb *ccBalancerWrapper) handleClose() { + ccb.balancer.Close() + ccb.done.Fire() +} + func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { if len(addrs) <= 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") - } ac, err := ccb.cc.newAddrConn(addrs, opts) if err != nil { + channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } acbw := &acBalancerWrapper{ac: ac} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() - ccb.subConns[acbw] = struct{}{} return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // The RemoveSubConn() is handled in the run() goroutine, to avoid deadlock - // during switchBalancer() if the old balancer calls RemoveSubConn() in its - // Close(). - ccb.updateCh.Put(sc) + // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it + // was required to handle the RemoveSubConn() method asynchronously by pushing + // the update onto the update channel. This was done to avoid a deadlock as + // switchBalancer() was holding cc.mu when calling Close() on the old + // balancer, which would in turn call RemoveSubConn(). + // + // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this + // asynchronously is probably not required anymore since the switchTo() method + // handles the balancer switch by pushing the update onto the channel. + // TODO(easwars): Handle this inline. + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -210,11 +339,6 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - ccb.mu.Lock() - defer ccb.mu.Unlock() - if ccb.subConns == nil { - return - } // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go new file mode 100644 index 00000000000..a220c47c59a --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2020 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz exports internals of the channelz implementation as required +// by other gRPC packages. +// +// The implementation of the channelz spec as defined in +// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by +// the `internal/channelz` package. +// +// Experimental +// +// Notice: All APIs in this package are experimental and may be removed in a +// later release. +package channelz + +import "google.golang.org/grpc/internal/channelz" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier = channelz.Identifier diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 28f09dc8707..3ed6eb8e75e 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -79,7 +79,7 @@ var ( // errNoTransportSecurity indicates that there is no transport security // being set for ClientConn. Users should either set one or explicitly // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)") // errTransportCredsAndBundle indicates that creds bundle is used together // with other individual Transport Credentials. errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials") @@ -159,23 +159,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if channelz.IsOn() { - if cc.dopts.channelzParentID != 0 { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) - channelz.AddTraceEvent(logger, cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Channel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID), - Severity: channelz.CtInfo, - }, - }) - } else { - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target) - channelz.Info(logger, cc.channelzID, "Channel Created") + pid := cc.dopts.channelzParentID + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) + ted := &channelz.TraceEventDesc{ + Desc: "Channel created", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), + Severity: channelz.CtInfo, } - cc.csMgr.channelzID = cc.channelzID } + channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) + cc.csMgr.channelzID = cc.channelzID if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { return nil, errNoTransportSecurity @@ -281,7 +278,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerBuildOpts = balancer.BuildOptions{ + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ DialCreds: credsClone, CredsBundle: cc.dopts.copts.CredsBundle, Dialer: cc.dopts.copts.Dialer, @@ -289,7 +286,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParentID: cc.channelzID, Target: cc.parsedTarget, - } + }) // Build the resolver. rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) @@ -398,7 +395,7 @@ type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} - channelzID int64 + channelzID *channelz.Identifier } // updateState updates the connectivity.State of ClientConn. @@ -464,34 +461,36 @@ var _ ClientConnInterface = (*ClientConn)(nil) // handshakes. It also handles errors on established connections by // re-resolving the name and reconnecting. type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - parsedTarget resolver.Target - authority string - dopts dialOptions - csMgr *connectivityStateManager - - balancerBuildOpts balancer.BuildOptions - blockingpicker *pickerWrapper - + ctx context.Context // Initialized using the background context at dial time. + cancel context.CancelFunc // Cancelled on close. + + // The following are initialized at dial time, and are read-only after that. + target string // User's dial target. + parsedTarget resolver.Target // See parseTargetAndFindResolver(). + authority string // See determineAuthority(). + dopts dialOptions // Default and user specified dial options. + channelzID *channelz.Identifier // Channelz identifier for the channel. + balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + + // The following provide their own synchronization, and therefore don't + // require cc.mu to be held to access them. + csMgr *connectivityStateManager + blockingpicker *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector + czData *channelzData + retryThrottler atomic.Value // Updated from service config. - mu sync.RWMutex - resolverWrapper *ccResolverWrapper - sc *ServiceConfig - conns map[*addrConn]struct{} - // Keepalive parameter can be updated if a GoAway is received. - mkp keepalive.ClientParameters - curBalancerName string - balancerWrapper *ccBalancerWrapper - retryThrottler atomic.Value - + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. firstResolveEvent *grpcsync.Event - channelzID int64 // channelz unique identification number - czData *channelzData + // mu protects the following fields. + // TODO: split mu so the same mutex isn't used for everything. + mu sync.RWMutex + resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + sc *ServiceConfig // Latest service config received from the resolver. + conns map[*addrConn]struct{} // Set to nil on close. + mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error @@ -536,14 +535,7 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.balancerWrapper != nil && cc.balancerWrapper.exitIdle() { - return - } - for ac := range cc.conns { - go ac.connect() - } + cc.balancerWrapper.exitIdle() } func (cc *ClientConn) scWatcher() { @@ -623,9 +615,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { // with the new addresses. cc.maybeApplyDefaultServiceConfig(nil) - if cc.balancerWrapper != nil { - cc.balancerWrapper.resolverError(err) - } + cc.balancerWrapper.resolverError(err) // No addresses are valid with err set; return early. cc.mu.Unlock() @@ -653,16 +643,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { cc.applyServiceConfigAndBalancer(sc, configSelector, s.Addresses) } else { ret = balancer.ErrBadResolverState - if cc.balancerWrapper == nil { - var err error - if s.ServiceConfig.Err != nil { - err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err) - } else { - err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config) - } - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{cc.sc}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) - cc.csMgr.updateState(connectivity.TransientFailure) + if cc.sc == nil { + // Apply the failing LB only if we haven't received valid service config + // from the name resolver in the past. + cc.applyFailingLB(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -670,24 +654,12 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { } var balCfg serviceconfig.LoadBalancingConfig - if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc != nil && cc.sc.lbConfig != nil { balCfg = cc.sc.lbConfig.cfg } - - cbn := cc.curBalancerName bw := cc.balancerWrapper cc.mu.Unlock() - if cbn != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - for i := 0; i < len(s.Addresses); { - if s.Addresses[i].Type == resolver.GRPCLB { - copy(s.Addresses[i:], s.Addresses[i+1:]) - s.Addresses = s.Addresses[:len(s.Addresses)-1] - continue - } - i++ - } - } + uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg}) if ret == nil { ret = uccsErr // prefer ErrBadResolver state since any other error is @@ -696,56 +668,28 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// switchBalancer starts the switching from current balancer to the balancer -// with the given name. -// -// It will NOT send the current address list to the new balancer. If needed, -// caller of this function should send address list to the new balancer after -// this function returns. +// applyFailingLB is akin to configuring an LB policy on the channel which +// always fails RPCs. Here, an actual LB policy is not configured, but an always +// erroring picker is configured, which returns errors with information about +// what was invalid in the received service config. A config selector with no +// service config is configured, and the connectivity state of the channel is +// set to TransientFailure. // // Caller must hold cc.mu. -func (cc *ClientConn) switchBalancer(name string) { - if strings.EqualFold(cc.curBalancerName, name) { - return - } - - channelz.Infof(logger, cc.channelzID, "ClientConn switching balancer to %q", name) - if cc.dopts.balancerBuilder != nil { - channelz.Info(logger, cc.channelzID, "ignoring balancer switching: Balancer DialOption used instead") - return - } - if cc.balancerWrapper != nil { - // Don't hold cc.mu while closing the balancers. The balancers may call - // methods that require cc.mu (e.g. cc.NewSubConn()). Holding the mutex - // would cause a deadlock in that case. - cc.mu.Unlock() - cc.balancerWrapper.close() - cc.mu.Lock() - } - - builder := balancer.Get(name) - if builder == nil { - channelz.Warningf(logger, cc.channelzID, "Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName) - channelz.Infof(logger, cc.channelzID, "failed to get balancer builder for: %v, using pick_first instead", name) - builder = newPickfirstBuilder() +func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { + var err error + if sc.Err != nil { + err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) } else { - channelz.Infof(logger, cc.channelzID, "Channel switches to new LB policy %q", name) + err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } - - cc.curBalancerName = builder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) + cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) + cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.csMgr.updateState(connectivity.TransientFailure) } func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return - } - // TODO(bar switching) send updates to all balancer wrappers when balancer - // gracefully switching is supported. - cc.balancerWrapper.handleSubConnStateChange(sc, s, err) - cc.mu.Unlock() + cc.balancerWrapper.updateSubConnState(sc, s, err) } // newAddrConn creates an addrConn for addrs and adds it to cc.conns. @@ -768,17 +712,21 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub cc.mu.Unlock() return nil, ErrClientConnClosing } - if channelz.IsOn() { - ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Created", - Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) + + var err error + ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") + if err != nil { + return nil, err } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel created", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID.Int()), + Severity: channelz.CtInfo, + }, + }) + cc.conns[ac] = struct{}{} cc.mu.Unlock() return ac, nil @@ -991,35 +939,26 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel cc.retryThrottler.Store((*retryThrottler)(nil)) } - if cc.dopts.balancerBuilder == nil { - // Only look at balancer types and switch balancer if balancer dial - // option is not set. - var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { - newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName + var newBalancerName string + if cc.sc != nil && cc.sc.lbConfig != nil { + newBalancerName = cc.sc.lbConfig.name + } else { + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break } } - cc.switchBalancer(newBalancerName) - } else if cc.balancerWrapper == nil { - // Balancer dial option was set, and this is the first time handling - // resolved addresses. Build a balancer with dopts.balancerBuilder. - cc.curBalancerName = cc.dopts.balancerBuilder.Name() - cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + if isGRPCLB { + newBalancerName = grpclbName + } else if cc.sc != nil && cc.sc.LB != nil { + newBalancerName = *cc.sc.LB + } else { + newBalancerName = PickFirstBalancerName + } } + cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { @@ -1070,11 +1009,11 @@ func (cc *ClientConn) Close() error { rWrapper := cc.resolverWrapper cc.resolverWrapper = nil bWrapper := cc.balancerWrapper - cc.balancerWrapper = nil cc.mu.Unlock() + // The order of closing matters here since the balancer wrapper assumes the + // picker is closed before it is closed. cc.blockingpicker.close() - if bWrapper != nil { bWrapper.close() } @@ -1085,22 +1024,22 @@ func (cc *ClientConn) Close() error { for ac := range conns { ac.tearDown(ErrClientConnClosing) } - if channelz.IsOn() { - ted := &channelz.TraceEventDesc{ - Desc: "Channel Deleted", + ted := &channelz.TraceEventDesc{ + Desc: "Channel deleted", + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), Severity: channelz.CtInfo, } - if cc.dopts.channelzParentID != 0 { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(cc.channelzID) } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from being + // deleted right away. + channelz.RemoveEntry(cc.channelzID) + return nil } @@ -1130,7 +1069,7 @@ type addrConn struct { backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} - channelzID int64 // channelz unique identification number. + channelzID *channelz.Identifier czData *channelzData } @@ -1312,14 +1251,12 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() - if channelz.IsOn() { - copts.ChannelzParentID = ac.channelzID - } + copts.ChannelzParentID = ac.channelzID newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) if err != nil { // newTr is either nil, or closed. - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } @@ -1332,7 +1269,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne newTr.Close(transport.ErrConnClosing) if connectCtx.Err() == context.DeadlineExceeded { err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v: %v", addr, err) + channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) return err } return nil @@ -1497,19 +1434,18 @@ func (ac *addrConn) tearDown(err error) { curTr.GracefulClose() ac.mu.Lock() } - if channelz.IsOn() { - channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ - Desc: "Subchannel Deleted", + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ + Desc: "Subchannel deleted", + Severity: channelz.CtInfo, + Parent: &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelzID.Int()), Severity: channelz.CtInfo, - Parent: &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID), - Severity: channelz.CtInfo, - }, - }) - // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to - // the entity being deleted, and thus prevent it from being deleted right away. - channelz.RemoveEntry(ac.channelzID) - } + }, + }) + // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add + // trace reference to the entity being deleted, and thus prevent it from + // being deleted right away. + channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() } diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 4fbed12565f..82bee1443bf 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -70,3 +70,29 @@ type info struct { func (info) AuthType() string { return "insecure" } + +// insecureBundle implements an insecure bundle. +// An insecure bundle provides a thin wrapper around insecureTC to support +// the credentials.Bundle interface. +type insecureBundle struct{} + +// NewBundle returns a bundle with disabled transport security and no per rpc credential. +func NewBundle() credentials.Bundle { + return insecureBundle{} +} + +// NewWithMode returns a new insecure Bundle. The mode is ignored. +func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) { + return insecureBundle{}, nil +} + +// PerRPCCredentials returns an nil implementation as insecure +// bundle does not support a per rpc credential. +func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials { + return nil +} + +// TransportCredentials returns the underlying insecure transport credential. +func (insecureBundle) TransportCredentials() credentials.TransportCredentials { + return NewCredentials() +} diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index c4bf09f9e94..f2f605a17c4 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -20,12 +20,11 @@ package grpc import ( "context" - "fmt" "net" "time" "google.golang.org/grpc/backoff" - "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal" @@ -45,19 +44,17 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor - dc Decompressor - bs internalbackoff.Strategy - block bool - returnLastError bool - timeout time.Duration - scChan <-chan ServiceConfig - authority string - copts transport.ConnectOptions - callOptions []CallOption - // This is used by WithBalancerName dial option. - balancerBuilder balancer.Builder - channelzParentID int64 + cp Compressor + dc Decompressor + bs internalbackoff.Strategy + block bool + returnLastError bool + timeout time.Duration + scChan <-chan ServiceConfig + authority string + copts transport.ConnectOptions + callOptions []CallOption + channelzParentID *channelz.Identifier disableServiceConfig bool disableRetry bool disableHealthCheck bool @@ -195,25 +192,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithBalancerName sets the balancer that the ClientConn will be initialized -// with. Balancer registered with balancerName will be used. This function -// panics if no balancer was registered by balancerName. -// -// The balancer cannot be overridden by balancer option specified by service -// config. -// -// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig -// instead. Will be removed in a future 1.x release. -func WithBalancerName(balancerName string) DialOption { - builder := balancer.Get(balancerName) - if builder == nil { - panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) - } - return newFuncDialOption(func(o *dialOptions) { - o.balancerBuilder = builder - }) -} - // WithServiceConfig returns a DialOption which has a channel to read the // service configuration. // @@ -304,8 +282,8 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. -// Will be supported throughout 1.x. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() +// instead. Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.TransportCredentials = insecure.NewCredentials() @@ -498,7 +476,7 @@ func WithAuthority(a string) DialOption { // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. -func WithChannelzParentID(id int64) DialOption { +func WithChannelzParentID(id *channelz.Identifier) DialOption { return newFuncDialOption(func(o *dialOptions) { o.channelzParentID = id }) diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index 668e0adcf0a..bb96ef57be8 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -72,9 +72,12 @@ type UnaryServerInfo struct { } // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the -// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as -// the status message of the RPC. +// execution of a unary RPC. +// +// If a UnaryHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go new file mode 100644 index 00000000000..7ba8f4d1831 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -0,0 +1,382 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package gracefulswitch implements a graceful switch load balancer. +package gracefulswitch + +import ( + "errors" + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed") +var _ balancer.Balancer = (*Balancer)(nil) + +// NewBalancer returns a graceful switch Balancer. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer { + return &Balancer{ + cc: cc, + bOpts: opts, + } +} + +// Balancer is a utility to gracefully switch from one balancer to +// a new balancer. It implements the balancer.Balancer interface. +type Balancer struct { + bOpts balancer.BuildOptions + cc balancer.ClientConn + + // mu protects the following fields and all fields within balancerCurrent + // and balancerPending. mu does not need to be held when calling into the + // child balancers, as all calls into these children happen only as a direct + // result of a call into the gracefulSwitchBalancer, which are also + // guaranteed to be synchronous. There is one exception: an UpdateState call + // from a child balancer when current and pending are populated can lead to + // calling Close() on the current. To prevent that racing with an + // UpdateSubConnState from the channel, we hold currentMu during Close and + // UpdateSubConnState calls. + mu sync.Mutex + balancerCurrent *balancerWrapper + balancerPending *balancerWrapper + closed bool // set to true when this balancer is closed + + // currentMu must be locked before mu. This mutex guards against this + // sequence of events: UpdateSubConnState() called, finds the + // balancerCurrent, gives up lock, updateState comes in, causes Close() on + // balancerCurrent before the UpdateSubConnState is called on the + // balancerCurrent. + currentMu sync.Mutex +} + +// swap swaps out the current lb with the pending lb and updates the ClientConn. +// The caller must hold gsb.mu. +func (gsb *Balancer) swap() { + gsb.cc.UpdateState(gsb.balancerPending.lastState) + cur := gsb.balancerCurrent + gsb.balancerCurrent = gsb.balancerPending + gsb.balancerPending = nil + go func() { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + cur.Close() + }() +} + +// Helper function that checks if the balancer passed in is current or pending. +// The caller must hold gsb.mu. +func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool { + return bw == gsb.balancerCurrent || bw == gsb.balancerPending +} + +// SwitchTo initializes the graceful switch process, which completes based on +// connectivity state changes on the current/pending balancer. Thus, the switch +// process is not complete when this method returns. This method must be called +// synchronously alongside the rest of the balancer.Balancer methods this +// Graceful Switch Balancer implements. +func (gsb *Balancer) SwitchTo(builder balancer.Builder) error { + gsb.mu.Lock() + if gsb.closed { + gsb.mu.Unlock() + return errBalancerClosed + } + bw := &balancerWrapper{ + gsb: gsb, + lastState: balancer.State{ + ConnectivityState: connectivity.Connecting, + Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), + }, + subconns: make(map[balancer.SubConn]bool), + } + balToClose := gsb.balancerPending // nil if there is no pending balancer + if gsb.balancerCurrent == nil { + gsb.balancerCurrent = bw + } else { + gsb.balancerPending = bw + } + gsb.mu.Unlock() + balToClose.Close() + // This function takes a builder instead of a balancer because builder.Build + // can call back inline, and this utility needs to handle the callbacks. + newBalancer := builder.Build(bw, gsb.bOpts) + if newBalancer == nil { + // This is illegal and should never happen; we clear the balancerWrapper + // we were constructing if it happens to avoid a potential panic. + gsb.mu.Lock() + if gsb.balancerPending != nil { + gsb.balancerPending = nil + } else { + gsb.balancerCurrent = nil + } + gsb.mu.Unlock() + return balancer.ErrBadResolverState + } + + // This write doesn't need to take gsb.mu because this field never gets read + // or written to on any calls from the current or pending. Calls from grpc + // to this balancer are guaranteed to be called synchronously, so this + // bw.Balancer field will never be forwarded to until this SwitchTo() + // function returns. + bw.Balancer = newBalancer + return nil +} + +// Returns nil if the graceful switch balancer is closed. +func (gsb *Balancer) latestBalancer() *balancerWrapper { + gsb.mu.Lock() + defer gsb.mu.Unlock() + if gsb.balancerPending != nil { + return gsb.balancerPending + } + return gsb.balancerCurrent +} + +// UpdateClientConnState forwards the update to the latest balancer created. +func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return errBalancerClosed + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + return balToUpdate.UpdateClientConnState(state) +} + +// ResolverError forwards the error to the latest balancer created. +func (gsb *Balancer) ResolverError(err error) { + // The resolver data is only relevant to the most recent LB Policy. + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // Perform this call without gsb.mu to prevent deadlocks if the child calls + // back into the channel. The latest balancer can never be closed during a + // call from the channel, even without gsb.mu held. + balToUpdate.ResolverError(err) +} + +// ExitIdle forwards the call to the latest balancer created. +// +// If the latest balancer does not support ExitIdle, the subConns are +// re-connected to manually. +func (gsb *Balancer) ExitIdle() { + balToUpdate := gsb.latestBalancer() + if balToUpdate == nil { + return + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + if ei, ok := balToUpdate.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() + return + } + for sc := range balToUpdate.subconns { + sc.Connect() + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.currentMu.Lock() + defer gsb.currentMu.Unlock() + gsb.mu.Lock() + // Forward update to the appropriate child. Even if there is a pending + // balancer, the current balancer should continue to get SubConn updates to + // maintain the proper state while the pending is still connecting. + var balToUpdate *balancerWrapper + if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] { + balToUpdate = gsb.balancerCurrent + } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { + balToUpdate = gsb.balancerPending + } + gsb.mu.Unlock() + if balToUpdate == nil { + // SubConn belonged to a stale lb policy that has not yet fully closed, + // or the balancer was already closed. + return + } + balToUpdate.UpdateSubConnState(sc, state) +} + +// Close closes any active child balancers. +func (gsb *Balancer) Close() { + gsb.mu.Lock() + gsb.closed = true + currentBalancerToClose := gsb.balancerCurrent + gsb.balancerCurrent = nil + pendingBalancerToClose := gsb.balancerPending + gsb.balancerPending = nil + gsb.mu.Unlock() + + currentBalancerToClose.Close() + pendingBalancerToClose.Close() +} + +// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer +// methods to help cleanup SubConns created by the wrapped balancer. +// +// It implements the balancer.ClientConn interface and is passed down in that +// capacity to the wrapped balancer. It maintains a set of subConns created by +// the wrapped balancer and calls from the latter to create/update/remove +// SubConns update this set before being forwarded to the parent ClientConn. +// State updates from the wrapped balancer can result in invocation of the +// graceful switch logic. +type balancerWrapper struct { + balancer.Balancer + gsb *Balancer + + lastState balancer.State + subconns map[balancer.SubConn]bool // subconns created by this balancer +} + +func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + if state.ConnectivityState == connectivity.Shutdown { + bw.gsb.mu.Lock() + delete(bw.subconns, sc) + bw.gsb.mu.Unlock() + } + // There is no need to protect this read with a mutex, as the write to the + // Balancer field happens in SwitchTo, which completes before this can be + // called. + bw.Balancer.UpdateSubConnState(sc, state) +} + +// Close closes the underlying LB policy and removes the subconns it created. bw +// must not be referenced via balancerCurrent or balancerPending in gsb when +// called. gsb.mu must not be held. Does not panic with a nil receiver. +func (bw *balancerWrapper) Close() { + // before Close is called. + if bw == nil { + return + } + // There is no need to protect this read with a mutex, as Close() is + // impossible to be called concurrently with the write in SwitchTo(). The + // callsites of Close() for this balancer in Graceful Switch Balancer will + // never be called until SwitchTo() returns. + bw.Balancer.Close() + bw.gsb.mu.Lock() + for sc := range bw.subconns { + bw.gsb.cc.RemoveSubConn(sc) + } + bw.gsb.mu.Unlock() +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + // Hold the mutex for this entire call to ensure it cannot occur + // concurrently with other updateState() calls. This causes updates to + // lastState and calls to cc.UpdateState to happen atomically. + bw.gsb.mu.Lock() + defer bw.gsb.mu.Unlock() + bw.lastState = state + + if !bw.gsb.balancerCurrentOrPending(bw) { + return + } + + if bw == bw.gsb.balancerCurrent { + // In the case that the current balancer exits READY, and there is a pending + // balancer, you can forward the pending balancer's cached State up to + // ClientConn and swap the pending into the current. This is because there + // is no reason to gracefully switch from and keep using the old policy as + // the ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil { + bw.gsb.swap() + return + } + // Even if there is a pending balancer waiting to be gracefully switched to, + // continue to forward current balancer updates to the Client Conn. Ignoring + // state + picker from the current would cause undefined behavior/cause the + // system to behave incorrectly from the current LB policies perspective. + // Also, the current LB is still being used by grpc to choose SubConns per + // RPC, and thus should use the most updated form of the current balancer. + bw.gsb.cc.UpdateState(state) + return + } + // This method is now dealing with a state update from the pending balancer. + // If the current balancer is currently in a state other than READY, the new + // policy can be swapped into place immediately. This is because there is no + // reason to gracefully switch from and keep using the old policy as the + // ClientConn is not connected to any backends. + if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready { + bw.gsb.swap() + } +} + +func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.gsb.mu.Unlock() + + sc, err := bw.gsb.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call + bw.gsb.cc.RemoveSubConn(sc) + bw.gsb.mu.Unlock() + return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) + } + bw.subconns[sc] = true + bw.gsb.mu.Unlock() + return sc, nil +} + +func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { + // Ignore ResolveNow requests from anything other than the most recent + // balancer, because older balancers were already removed from the config. + if bw != bw.gsb.latestBalancer() { + return + } + bw.gsb.cc.ResolveNow(opts) +} + +func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.RemoveSubConn(sc) +} + +func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + bw.gsb.mu.Lock() + if !bw.gsb.balancerCurrentOrPending(bw) { + bw.gsb.mu.Unlock() + return + } + bw.gsb.mu.Unlock() + bw.gsb.cc.UpdateAddresses(sc, addrs) +} + +func (bw *balancerWrapper) Target() string { + return bw.gsb.cc.Target() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index 5cc3aeddb21..0a25ce43f3f 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -31,7 +31,7 @@ import ( // Logger is the global binary logger. It can be used to get binary logger for // each method. type Logger interface { - getMethodLogger(methodName string) *MethodLogger + GetMethodLogger(methodName string) MethodLogger } // binLogger is the global binary logger for the binary. One of this should be @@ -49,17 +49,24 @@ func SetLogger(l Logger) { binLogger = l } +// GetLogger gets the binarg logger. +// +// Only call this at init time. +func GetLogger() Logger { + return binLogger +} + // GetMethodLogger returns the methodLogger for the given methodName. // // methodName should be in the format of "/service/method". // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func GetMethodLogger(methodName string) *MethodLogger { +func GetMethodLogger(methodName string) MethodLogger { if binLogger == nil { return nil } - return binLogger.getMethodLogger(methodName) + return binLogger.GetMethodLogger(methodName) } func init() { @@ -68,17 +75,29 @@ func init() { binLogger = NewLoggerFromConfigString(configStr) } -type methodLoggerConfig struct { +// MethodLoggerConfig contains the setting for logging behavior of a method +// logger. Currently, it contains the max length of header and message. +type MethodLoggerConfig struct { // Max length of header and message. - hdr, msg uint64 + Header, Message uint64 +} + +// LoggerConfig contains the config for loggers to create method loggers. +type LoggerConfig struct { + All *MethodLoggerConfig + Services map[string]*MethodLoggerConfig + Methods map[string]*MethodLoggerConfig + + Blacklist map[string]struct{} } type logger struct { - all *methodLoggerConfig - services map[string]*methodLoggerConfig - methods map[string]*methodLoggerConfig + config LoggerConfig +} - blacklist map[string]struct{} +// NewLoggerFromConfig builds a logger with the given LoggerConfig. +func NewLoggerFromConfig(config LoggerConfig) Logger { + return &logger{config: config} } // newEmptyLogger creates an empty logger. The map fields need to be filled in @@ -88,57 +107,57 @@ func newEmptyLogger() *logger { } // Set method logger for "*". -func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error { - if l.all != nil { +func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error { + if l.config.All != nil { return fmt.Errorf("conflicting global rules found") } - l.all = ml + l.config.All = ml return nil } // Set method logger for "service/*". // // New methodLogger with same service overrides the old one. -func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error { - if _, ok := l.services[service]; ok { +func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Services[service]; ok { return fmt.Errorf("conflicting service rules for service %v found", service) } - if l.services == nil { - l.services = make(map[string]*methodLoggerConfig) + if l.config.Services == nil { + l.config.Services = make(map[string]*MethodLoggerConfig) } - l.services[service] = ml + l.config.Services[service] = ml return nil } // Set method logger for "service/method". // // New methodLogger with same method overrides the old one. -func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error { - if _, ok := l.blacklist[method]; ok { +func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.methods == nil { - l.methods = make(map[string]*methodLoggerConfig) + if l.config.Methods == nil { + l.config.Methods = make(map[string]*MethodLoggerConfig) } - l.methods[method] = ml + l.config.Methods[method] = ml return nil } // Set blacklist method for "-service/method". func (l *logger) setBlacklist(method string) error { - if _, ok := l.blacklist[method]; ok { + if _, ok := l.config.Blacklist[method]; ok { return fmt.Errorf("conflicting blacklist rules for method %v found", method) } - if _, ok := l.methods[method]; ok { + if _, ok := l.config.Methods[method]; ok { return fmt.Errorf("conflicting method rules for method %v found", method) } - if l.blacklist == nil { - l.blacklist = make(map[string]struct{}) + if l.config.Blacklist == nil { + l.config.Blacklist = make(map[string]struct{}) } - l.blacklist[method] = struct{}{} + l.config.Blacklist[method] = struct{}{} return nil } @@ -148,23 +167,23 @@ func (l *logger) setBlacklist(method string) error { // // Each methodLogger returned by this method is a new instance. This is to // generate sequence id within the call. -func (l *logger) getMethodLogger(methodName string) *MethodLogger { +func (l *logger) GetMethodLogger(methodName string) MethodLogger { s, m, err := grpcutil.ParseMethod(methodName) if err != nil { grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err) return nil } - if ml, ok := l.methods[s+"/"+m]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Methods[s+"/"+m]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if _, ok := l.blacklist[s+"/"+m]; ok { + if _, ok := l.config.Blacklist[s+"/"+m]; ok { return nil } - if ml, ok := l.services[s]; ok { - return newMethodLogger(ml.hdr, ml.msg) + if ml, ok := l.config.Services[s]; ok { + return newMethodLogger(ml.Header, ml.Message) } - if l.all == nil { + if l.config.All == nil { return nil } - return newMethodLogger(l.all.hdr, l.all.msg) + return newMethodLogger(l.config.All.Header, l.config.All.Message) } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index d8f4e7602fd..ab589a76bf9 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -89,7 +89,7 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { if err != nil { return fmt.Errorf("invalid config: %q, %v", config, err) } - if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } return nil @@ -104,11 +104,11 @@ func (l *logger) fillMethodLoggerWithConfigString(config string) error { return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err) } if m == "*" { - if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } else { - if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil { + if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil { return fmt.Errorf("invalid config: %v", err) } } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 0cdb4183150..24df0a1a0c4 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -48,7 +48,11 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. -type MethodLogger struct { +type MethodLogger interface { + Log(LogEntryConfig) +} + +type methodLogger struct { headerMaxLen, messageMaxLen uint64 callID uint64 @@ -57,8 +61,8 @@ type MethodLogger struct { sink Sink // TODO(blog): make this plugable. } -func newMethodLogger(h, m uint64) *MethodLogger { - return &MethodLogger{ +func newMethodLogger(h, m uint64) *methodLogger { + return &methodLogger{ headerMaxLen: h, messageMaxLen: m, @@ -69,8 +73,10 @@ func newMethodLogger(h, m uint64) *MethodLogger { } } -// Log creates a proto binary log entry, and logs it to the sink. -func (ml *MethodLogger) Log(c LogEntryConfig) { +// Build is an internal only method for building the proto message out of the +// input event. It's made public to enable other library to reuse as much logic +// in methodLogger as possible. +func (ml *methodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -85,11 +91,15 @@ func (ml *MethodLogger) Log(c LogEntryConfig) { case *pb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } + return m +} - ml.sink.Write(m) +// Log creates a proto binary log entry, and logs it to the sink. +func (ml *methodLogger) Log(c LogEntryConfig) { + ml.sink.Write(ml.Build(c)) } -func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *methodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -119,7 +129,7 @@ func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { return truncated } -func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *methodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index cd1807543ee..777cbcd7921 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,6 +24,8 @@ package channelz import ( + "context" + "errors" "fmt" "sort" "sync" @@ -49,7 +51,8 @@ var ( // TurnOn turns on channelz data collection. func TurnOn() { if !IsOn() { - NewChannelzStorage() + db.set(newChannelMap()) + idGen.reset() atomic.StoreInt32(&curState, 1) } } @@ -94,46 +97,40 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorage initializes channelz data storage and id generator. +// NewChannelzStorageForTesting initializes channelz data storage and id +// generator for testing purposes. // -// This function returns a cleanup function to wait for all channelz state to be reset by the -// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests -// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen -// to remove some entity just register by the new test, since the id space is the same. -// -// Note: This function is exported for testing purpose only. User should not call -// it in most cases. -func NewChannelzStorage() (cleanup func() error) { - db.set(&channelMap{ - topLevelChannels: make(map[int64]struct{}), - channels: make(map[int64]*channel), - listenSockets: make(map[int64]*listenSocket), - normalSockets: make(map[int64]*normalSocket), - servers: make(map[int64]*server), - subChannels: make(map[int64]*subChannel), - }) +// Returns a cleanup function to be invoked by the test, which waits for up to +// 10s for all channelz state to be reset by the grpc goroutines when those +// entities get closed. This cleanup function helps with ensuring that tests +// don't mess up each other. +func NewChannelzStorageForTesting() (cleanup func() error) { + db.set(newChannelMap()) idGen.reset() + return func() error { - var err error cm := db.get() if cm == nil { return nil } - for i := 0; i < 1000; i++ { - cm.mu.Lock() - if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 { - cm.mu.Unlock() - // all things stored in the channelz map have been cleared. + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + ticker := time.NewTicker(10 * time.Millisecond) + defer ticker.Stop() + for { + cm.mu.RLock() + topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) + cm.mu.RUnlock() + + if err := ctx.Err(); err != nil { + return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) + } + if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { return nil } - cm.mu.Unlock() - time.Sleep(10 * time.Millisecond) + <-ticker.C } - - cm.mu.Lock() - err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)) - cm.mu.Unlock() - return err } } @@ -188,54 +185,77 @@ func GetServer(id int64) *ServerMetric { return db.get().GetServer(id) } -// RegisterChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). pid = 0 means no parent. It returns the unique channelz tracking id -// assigned to this channel. -func RegisterChannel(c Channel, pid int64, ref string) int64 { +// RegisterChannel registers the given channel c in the channelz database with +// ref as its reference name, and adds it to the child list of its parent +// (identified by pid). pid == nil means no parent. +// +// Returns a unique channelz identifier assigned to this channel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { id := idGen.genID() + var parent int64 + isTopChannel := true + if pid != nil { + isTopChannel = false + parent = pid.Int() + } + + if !IsOn() { + return newIdentifer(RefChannel, id, pid) + } + cn := &channel{ refName: ref, c: c, subChans: make(map[int64]string), nestedChans: make(map[int64]string), id: id, - pid: pid, + pid: parent, trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - if pid == 0 { - db.get().addChannel(id, cn, true, pid) - } else { - db.get().addChannel(id, cn, false, pid) - } - return id + db.get().addChannel(id, cn, isTopChannel, parent) + return newIdentifer(RefChannel, id, pid) } -// RegisterSubChannel registers the given channel c in channelz database with ref -// as its reference name, and add it to the child list of its parent (identified -// by pid). It returns the unique channelz tracking id assigned to this subchannel. -func RegisterSubChannel(c Channel, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a SubChannel's parent id cannot be 0") - return 0 +// RegisterSubChannel registers the given subChannel c in the channelz database +// with ref as its reference name, and adds it to the child list of its parent +// (identified by pid). +// +// Returns a unique channelz identifier assigned to this subChannel. +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a SubChannel's parent id cannot be nil") } id := idGen.genID() + if !IsOn() { + return newIdentifer(RefSubChannel, id, pid), nil + } + sc := &subChannel{ refName: ref, c: c, sockets: make(map[int64]string), id: id, - pid: pid, + pid: pid.Int(), trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())}, } - db.get().addSubChannel(id, sc, pid) - return id + db.get().addSubChannel(id, sc, pid.Int()) + return newIdentifer(RefSubChannel, id, pid), nil } // RegisterServer registers the given server s in channelz database. It returns // the unique channelz tracking id assigned to this server. -func RegisterServer(s Server, ref string) int64 { +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterServer(s Server, ref string) *Identifier { id := idGen.genID() + if !IsOn() { + return newIdentifer(RefServer, id, nil) + } + svr := &server{ refName: ref, s: s, @@ -244,71 +264,92 @@ func RegisterServer(s Server, ref string) int64 { id: id, } db.get().addServer(id, svr) - return id + return newIdentifer(RefServer, id, nil) } // RegisterListenSocket registers the given listen socket s in channelz database // with ref as its reference name, and add it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this listen socket. -func RegisterListenSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a ListenSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a ListenSocket's parent id cannot be 0") } id := idGen.genID() - ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addListenSocket(id, ls, pid) - return id + if !IsOn() { + return newIdentifer(RefListenSocket, id, pid), nil + } + + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addListenSocket(id, ls, pid.Int()) + return newIdentifer(RefListenSocket, id, pid), nil } // RegisterNormalSocket registers the given normal socket s in channelz database -// with ref as its reference name, and add it to the child list of its parent +// with ref as its reference name, and adds it to the child list of its parent // (identified by pid). It returns the unique channelz tracking id assigned to // this normal socket. -func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { - if pid == 0 { - logger.Error("a NormalSocket's parent id cannot be 0") - return 0 +// +// If channelz is not turned ON, the channelz database is not mutated. +func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, error) { + if pid == nil { + return nil, errors.New("a NormalSocket's parent id cannot be 0") } id := idGen.genID() - ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} - db.get().addNormalSocket(id, ns, pid) - return id + if !IsOn() { + return newIdentifer(RefNormalSocket, id, pid), nil + } + + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid.Int()} + db.get().addNormalSocket(id, ns, pid.Int()) + return newIdentifer(RefNormalSocket, id, pid), nil } // RemoveEntry removes an entry with unique channelz tracking id to be id from // channelz database. -func RemoveEntry(id int64) { - db.get().removeEntry(id) +// +// If channelz is not turned ON, this function is a no-op. +func RemoveEntry(id *Identifier) { + if !IsOn() { + return + } + db.get().removeEntry(id.Int()) } -// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added -// to the channel trace. -// The Parent field is optional. It is used for event that will be recorded in the entity's parent -// trace also. +// TraceEventDesc is what the caller of AddTraceEvent should provide to describe +// the event to be added to the channel trace. +// +// The Parent field is optional. It is used for an event that will be recorded +// in the entity's parent trace. type TraceEventDesc struct { Desc string Severity Severity Parent *TraceEventDesc } -// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc. -func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) { - for d := desc; d != nil; d = d.Parent { - switch d.Severity { - case CtUnknown, CtInfo: - l.InfoDepth(depth+1, d.Desc) - case CtWarning: - l.WarningDepth(depth+1, d.Desc) - case CtError: - l.ErrorDepth(depth+1, d.Desc) - } +// AddTraceEvent adds trace related to the entity with specified id, using the +// provided TraceEventDesc. +// +// If channelz is not turned ON, this will simply log the event descriptions. +func AddTraceEvent(l grpclog.DepthLoggerV2, id *Identifier, depth int, desc *TraceEventDesc) { + // Log only the trace description associated with the bottom most entity. + switch desc.Severity { + case CtUnknown, CtInfo: + l.InfoDepth(depth+1, withParens(id)+desc.Desc) + case CtWarning: + l.WarningDepth(depth+1, withParens(id)+desc.Desc) + case CtError: + l.ErrorDepth(depth+1, withParens(id)+desc.Desc) } + if getMaxTraceEntry() == 0 { return } - db.get().traceEvent(id, desc) + if IsOn() { + db.get().traceEvent(id.Int(), desc) + } } // channelMap is the storage data structure for channelz. @@ -326,6 +367,17 @@ type channelMap struct { normalSockets map[int64]*normalSocket } +func newChannelMap() *channelMap { + return &channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + } +} + func (c *channelMap) addServer(id int64, s *server) { c.mu.Lock() s.cm = c diff --git a/vendor/google.golang.org/grpc/internal/channelz/id.go b/vendor/google.golang.org/grpc/internal/channelz/id.go new file mode 100644 index 00000000000..c9a27acd371 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/channelz/id.go @@ -0,0 +1,75 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import "fmt" + +// Identifier is an opaque identifier which uniquely identifies an entity in the +// channelz database. +type Identifier struct { + typ RefChannelType + id int64 + str string + pid *Identifier +} + +// Type returns the entity type corresponding to id. +func (id *Identifier) Type() RefChannelType { + return id.typ +} + +// Int returns the integer identifier corresponding to id. +func (id *Identifier) Int() int64 { + return id.id +} + +// String returns a string representation of the entity corresponding to id. +// +// This includes some information about the parent as well. Examples: +// Top-level channel: [Channel #channel-number] +// Nested channel: [Channel #parent-channel-number Channel #channel-number] +// Sub channel: [Channel #parent-channel SubChannel #subchannel-number] +func (id *Identifier) String() string { + return id.str +} + +// Equal returns true if other is the same as id. +func (id *Identifier) Equal(other *Identifier) bool { + if (id != nil) != (other != nil) { + return false + } + if id == nil && other == nil { + return true + } + return id.typ == other.typ && id.id == other.id && id.pid == other.pid +} + +// NewIdentifierForTesting returns a new opaque identifier to be used only for +// testing purposes. +func NewIdentifierForTesting(typ RefChannelType, id int64, pid *Identifier) *Identifier { + return newIdentifer(typ, id, pid) +} + +func newIdentifer(typ RefChannelType, id int64, pid *Identifier) *Identifier { + str := fmt.Sprintf("%s #%d", typ, id) + if pid != nil { + str = fmt.Sprintf("%s %s", pid, str) + } + return &Identifier{typ: typ, id: id, str: str, pid: pid} +} diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index b0013f9c886..8e13a3d2ce7 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -26,77 +26,54 @@ import ( var logger = grpclog.Component("channelz") +func withParens(id *Identifier) string { + return "[" + id.String() + "] " +} + // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, args...) - } +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtInfo, + }) } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtInfo, - }) - } else { - l.InfoDepth(1, msg) - } +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtInfo, + }) } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, args...) - } +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtWarning, + }) } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtWarning, - }) - } else { - l.WarningDepth(1, msg) - } +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtWarning, + }) } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) { - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: fmt.Sprint(args...), - Severity: CtError, - }) - } else { - l.ErrorDepth(1, args...) - } +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprint(args...), + Severity: CtError, + }) } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - if IsOn() { - AddTraceEvent(l, id, 1, &TraceEventDesc{ - Desc: msg, - Severity: CtError, - }) - } else { - l.ErrorDepth(1, msg) - } +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { + AddTraceEvent(l, id, 1, &TraceEventDesc{ + Desc: fmt.Sprintf(format, args...), + Severity: CtError, + }) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 3c595d154bd..ad0ce4dabf0 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -686,12 +686,33 @@ const ( type RefChannelType int const ( + // RefUnknown indicates an unknown entity type, the zero value for this type. + RefUnknown RefChannelType = iota // RefChannel indicates the referenced entity is a Channel. - RefChannel RefChannelType = iota + RefChannel // RefSubChannel indicates the referenced entity is a SubChannel. RefSubChannel + // RefServer indicates the referenced entity is a Server. + RefServer + // RefListenSocket indicates the referenced entity is a ListenSocket. + RefListenSocket + // RefNormalSocket indicates the referenced entity is a NormalSocket. + RefNormalSocket ) +var refChannelTypeToString = map[RefChannelType]string{ + RefUnknown: "Unknown", + RefChannel: "Channel", + RefSubChannel: "SubChannel", + RefServer: "Server", + RefListenSocket: "ListenSocket", + RefNormalSocket: "NormalSocket", +} + +func (r RefChannelType) String() string { + return refChannelTypeToString[r] +} + func (c *channelTrace) dumpData() *ChannelTrace { c.mu.Lock() ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 9bad03cec64..7d996e51b5c 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -26,13 +26,13 @@ import ( const ( // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name. // Do not use this and read from env directly. Its value is read and kept in - // variable BootstrapFileName. + // variable XDSBootstrapFileName. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP" - // XDSBootstrapFileContentEnv is the env variable to set bootstrapp file + // XDSBootstrapFileContentEnv is the env variable to set bootstrap file // content. Do not use this and read from env directly. Its value is read - // and kept in variable BootstrapFileName. + // and kept in variable XDSBootstrapFileContent. // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" @@ -41,6 +41,7 @@ const ( clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" + outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" @@ -82,7 +83,10 @@ var ( // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") - + // XDSOutlierDetection indicates whether outlier detection support is + // enabled, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true". + XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true") // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 1b596bf3579..6d355b0b013 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -38,11 +38,10 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second - // ParseServiceConfigForTesting is for creating a fake - // ClientConn for resolver testing only - ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult + // ParseServiceConfig parses a JSON representation of the service config. + ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and - // parsing. Both a and b should be returned by ParseServiceConfigForTesting. + // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the // there's difference in white space. EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool @@ -86,3 +85,9 @@ const ( // that supports backend returned by grpclb balancer. CredsBundleModeBackendFromBalancer = "backend-from-balancer" ) + +// RLSLoadBalancingPolicyName is the name of the RLS LB policy. +// +// It currently has an experimental suffix which would be removed once +// end-to-end testing of the policy is completed. +const RLSLoadBalancingPolicyName = "rls_experimental" diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index b8733dbf340..b2980f8ac44 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -22,6 +22,9 @@ package metadata import ( + "fmt" + "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" ) @@ -72,3 +75,46 @@ func Set(addr resolver.Address, md metadata.MD) resolver.Address { addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md)) return addr } + +// Validate returns an error if the input md contains invalid keys or values. +// +// If the header is not a pseudo-header, the following items are checked: +// - header names must contain one or more characters from this set [0-9 a-z _ - .]. +// - if the header-name ends with a "-bin" suffix, no validation of the header value is performed. +// - otherwise, the header value must contain one or more characters from the set [%x20-%x7E]. +func Validate(md metadata.MD) error { + for k, vals := range md { + // pseudo-header will be ignored + if k[0] == ':' { + continue + } + // check key, for i that saving a conversion if not using for range + for i := 0; i < len(k); i++ { + r := k[i] + if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' { + return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", k) + } + } + if strings.HasSuffix(k, "-bin") { + continue + } + // check value + for _, val := range vals { + if hasNotPrintable(val) { + return fmt.Errorf("header key %q contains value with non-printable ASCII characters", k) + } + } + } + return nil +} + +// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E +func hasNotPrintable(msg string) bool { + // for i that saving a conversion if not using for range + for i := 0; i < len(msg); i++ { + if msg[i] < 0x20 || msg[i] > 0x7E { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go new file mode 100644 index 00000000000..0177af4b511 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -0,0 +1,82 @@ +/* + * + * Copyright 2021 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package pretty defines helper functions to pretty-print structs for logging. +package pretty + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/golang/protobuf/jsonpb" + protov1 "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + protov2 "google.golang.org/protobuf/proto" +) + +const jsonIndent = " " + +// ToJSON marshals the input into a json string. +// +// If marshal fails, it falls back to fmt.Sprintf("%+v"). +func ToJSON(e interface{}) string { + switch ee := e.(type) { + case protov1.Message: + mm := jsonpb.Marshaler{Indent: jsonIndent} + ret, err := mm.MarshalToString(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return ret + case protov2.Message: + mm := protojson.MarshalOptions{ + Multiline: true, + Indent: jsonIndent, + } + ret, err := mm.Marshal(ee) + if err != nil { + // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2 + // messages are not imported, and this will fail because the message + // is not found. + return fmt.Sprintf("%+v", ee) + } + return string(ret) + default: + ret, err := json.MarshalIndent(ee, "", jsonIndent) + if err != nil { + return fmt.Sprintf("%+v", ee) + } + return string(ret) + } +} + +// FormatJSON formats the input json bytes with indentation. +// +// If Indent fails, it returns the unchanged input as string. +func FormatJSON(b []byte) string { + var out bytes.Buffer + err := json.Indent(&out, b, "", jsonIndent) + if err != nil { + return string(b) + } + return out.String() +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f0c72d33710..38ed3d566ff 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -132,7 +132,7 @@ type http2Client struct { kpDormant bool // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData onGoAway func(GoAwayReason) @@ -351,8 +351,9 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } t.statsHandler.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr)) + if err != nil { + return nil, err } if t.keepaliveEnabled { t.kpDormancyCond = sync.NewCond(&t.mu) @@ -898,9 +899,7 @@ func (t *http2Client) Close(err error) { t.controlBuf.finish() t.cancel() t.conn.Close() - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Append info about previous goaways if there were any, since this may be important // for understanding the root cause for this connection to be closed. _, goAwayDebugMessage := t.GetGoAwayReason() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 2c6eaf0e59c..0956b500c18 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "google.golang.org/grpc/internal/grpcutil" + "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -117,7 +118,7 @@ type http2Server struct { idle time.Time // Fields below are for channelz metric collection. - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData bufferPool *bufferPool @@ -231,6 +232,11 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if kp.Timeout == 0 { kp.Timeout = defaultServerKeepaliveTimeout } + if kp.Time != infinity { + if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) + } + } kep := config.KeepalivePolicy if kep.MinTime == 0 { kep.MinTime = defaultKeepalivePolicyMinTime @@ -275,12 +281,12 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, connBegin := &stats.ConnBegin{} t.stats.HandleConn(t.ctx, connBegin) } - if channelz.IsOn() { - t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + if err != nil { + return nil, err } t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1) - t.framer.writer.Flush() defer func() { @@ -1210,9 +1216,7 @@ func (t *http2Server) Close() { if err := t.conn.Close(); err != nil && logger.V(logLevel) { logger.Infof("transport: error closing conn during Close: %v", err) } - if channelz.IsOn() { - channelz.RemoveEntry(t.channelzID) - } + channelz.RemoveEntry(t.channelzID) // Cancel all active streams. for _, s := range streams { s.cancel() diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index d3bf65b2bdf..a9ce717f160 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -529,7 +530,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 } @@ -563,7 +564,7 @@ type ConnectOptions struct { // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. - ChannelzParentID int64 + ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 // UseProxy specifies if a proxy should be used. @@ -741,6 +742,12 @@ func (e ConnectionError) Origin() error { return e.err } +// Unwrap returns the original error of this connection error or nil when the +// origin is nil. +func (e ConnectionError) Unwrap() error { + return e.err +} + var ( // ErrConnClosing indicates that the transport is closing. ErrConnClosing = connectionErrorf(true, nil, "transport is closing") diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 3604c7819fd..8e0f6abe89d 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -188,7 +188,9 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } return out, true } @@ -226,7 +228,9 @@ func FromOutgoingContext(ctx context.Context) (MD, bool) { // map, and there's no guarantee that the MD attached to the context is // created using our helper functions. key := strings.ToLower(k) - out[key] = v + s := make([]string, len(v)) + copy(s, v) + out[key] = s } for _, added := range raw.added { if len(added)%2 == 1 { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index 5168b62b078..fb7a99e0a27 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -44,79 +44,107 @@ func (*pickfirstBuilder) Name() string { } type pickfirstBalancer struct { - state connectivity.State - cc balancer.ClientConn - sc balancer.SubConn + state connectivity.State + cc balancer.ClientConn + subConn balancer.SubConn } func (b *pickfirstBalancer) ResolverError(err error) { - switch b.state { - case connectivity.TransientFailure, connectivity.Idle, connectivity.Connecting: - // Set a failing picker if we don't have a good picker. - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, - }) - } if logger.V(2) { logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) } + if b.subConn == nil { + b.state = connectivity.TransientFailure + } + + if b.state != connectivity.TransientFailure { + // The picker will not change since the balancer does not currently + // report an error. + return + } + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)}, + }) } -func (b *pickfirstBalancer) UpdateClientConnState(cs balancer.ClientConnState) error { - if len(cs.ResolverState.Addresses) == 0 { +func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { + if len(state.ResolverState.Addresses) == 0 { + // The resolver reported an empty address list. Treat it like an error by + // calling b.ResolverError. + if b.subConn != nil { + // Remove the old subConn. All addresses were removed, so it is no longer + // valid. + b.cc.RemoveSubConn(b.subConn) + b.subConn = nil + } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if b.sc == nil { - var err error - b.sc, err = b.cc.NewSubConn(cs.ResolverState.Addresses, balancer.NewSubConnOptions{}) - if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) - } - b.state = connectivity.TransientFailure - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.TransientFailure, - Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, - }) - return balancer.ErrBadResolverState + + if b.subConn != nil { + b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + return nil + } + + subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + if err != nil { + if logger.V(2) { + logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) } - b.state = connectivity.Idle - b.cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: &picker{result: balancer.PickResult{SubConn: b.sc}}}) - b.sc.Connect() - } else { - b.cc.UpdateAddresses(b.sc, cs.ResolverState.Addresses) - b.sc.Connect() + b.state = connectivity.TransientFailure + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)}, + }) + return balancer.ErrBadResolverState } + b.subConn = subConn + b.state = connectivity.Idle + b.cc.UpdateState(balancer.State{ + ConnectivityState: connectivity.Idle, + Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + }) + b.subConn.Connect() return nil } -func (b *pickfirstBalancer) UpdateSubConnState(sc balancer.SubConn, s balancer.SubConnState) { +func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", sc, s) + logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) } - if b.sc != sc { + if b.subConn != subConn { if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") } return } - b.state = s.ConnectivityState - if s.ConnectivityState == connectivity.Shutdown { - b.sc = nil + b.state = state.ConnectivityState + if state.ConnectivityState == connectivity.Shutdown { + b.subConn = nil return } - switch s.ConnectivityState { + switch state.ConnectivityState { case connectivity.Ready: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{result: balancer.PickResult{SubConn: sc}}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, + }) case connectivity.Connecting: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, + }) case connectivity.Idle: - b.cc.UpdateState(balancer.State{ConnectivityState: s.ConnectivityState, Picker: &idlePicker{sc: sc}}) + b.cc.UpdateState(balancer.State{ + ConnectivityState: state.ConnectivityState, + Picker: &idlePicker{subConn: subConn}, + }) case connectivity.TransientFailure: b.cc.UpdateState(balancer.State{ - ConnectivityState: s.ConnectivityState, - Picker: &picker{err: s.ConnectionError}, + ConnectivityState: state.ConnectivityState, + Picker: &picker{err: state.ConnectionError}, }) } } @@ -125,8 +153,8 @@ func (b *pickfirstBalancer) Close() { } func (b *pickfirstBalancer) ExitIdle() { - if b.sc != nil && b.state == connectivity.Idle { - b.sc.Connect() + if b.subConn != nil && b.state == connectivity.Idle { + b.subConn.Connect() } } @@ -135,18 +163,18 @@ type picker struct { err error } -func (p *picker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { +func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) { return p.result, p.err } // idlePicker is used when the SubConn is IDLE and kicks the SubConn into // CONNECTING when Pick is called. type idlePicker struct { - sc balancer.SubConn + subConn balancer.SubConn } -func (i *idlePicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) { - i.sc.Connect() +func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { + i.subConn.Connect() return balancer.PickResult{}, balancer.ErrNoSubConnAvailable } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 58c802f8aec..978b89f37a4 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH} mkdir -p ${GOBIN} echo "remove existing generated files" -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go') +# grpc_testing_not_regenerate/*.pb.go is not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate') echo "go install google.golang.org/protobuf/cmd/protoc-gen-go" (cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go) @@ -117,9 +117,9 @@ done mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1 -# grpc_testingv3/testv3.pb.go is not re-generated because it was -# intentionally generated by an older version of protoc-gen-go. -rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go +# grpc_testing_not_regenerate/*.pb.go are not re-generated, +# see grpc_testing_not_regenerate/README.md for details. +rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go # grpc/service_config/service_config.proto does not have a go_package option. mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index e28b6802606..ca2e35a3596 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -139,13 +140,18 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. -func (a *Address) Equal(o Address) bool { +func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && a.Type == o.Type && a.Metadata == o.Metadata } +// String returns JSON formatted string representation of the address. +func (a Address) String() string { + return pretty.ToJSON(a) +} + // BuildOptions includes additional information for the builder to create // the resolver. type BuildOptions struct { diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 2c47cd54f07..05a9d4e0bac 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,7 +19,6 @@ package grpc import ( - "fmt" "strings" "sync" @@ -27,6 +26,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -97,10 +97,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { if ccr.done.HasFired() { return nil } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending update to cc: %v", s) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(s) - } + ccr.addChannelzTraceEvent(s) ccr.curState = s if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { return balancer.ErrBadResolverState @@ -125,10 +122,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: sending new addresses to cc: %v", addrs) - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) ccr.curState.Addresses = addrs ccr.cc.updateResolverState(ccr.curState, nil) } @@ -141,7 +135,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { if ccr.done.HasFired() { return } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %v", sc) + channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) if ccr.cc.dopts.disableServiceConfig { channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") return @@ -151,9 +145,7 @@ func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) return } - if channelz.IsOn() { - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) ccr.curState.ServiceConfig = scpr ccr.cc.updateResolverState(ccr.curState, nil) } @@ -180,8 +172,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.AddTraceEvent(logger, ccr.cc.channelzID, 0, &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")), - Severity: channelz.CtInfo, - }) + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eadf9e05fd1..96431a058bf 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -134,7 +134,7 @@ type Server struct { channelzRemoveOnce sync.Once serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop - channelzID int64 // channelz unique identification number + channelzID *channelz.Identifier czData *channelzData serverWorkerChannels []chan *serverWorkerData @@ -584,9 +584,8 @@ func NewServer(opt ...ServerOption) *Server { s.initServerWorkers() } - if channelz.IsOn() { - s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") - } + s.channelzID = channelz.RegisterServer(&channelzServer{s}, "") + channelz.Info(logger, s.channelzID, "Server created") return s } @@ -712,7 +711,7 @@ var ErrServerStopped = errors.New("grpc: the server has been stopped") type listenSocket struct { net.Listener - channelzID int64 + channelzID *channelz.Identifier } func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { @@ -724,9 +723,8 @@ func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { func (l *listenSocket) Close() error { err := l.Listener.Close() - if channelz.IsOn() { - channelz.RemoveEntry(l.channelzID) - } + channelz.RemoveEntry(l.channelzID) + channelz.Info(logger, l.channelzID, "ListenSocket deleted") return err } @@ -759,11 +757,6 @@ func (s *Server) Serve(lis net.Listener) error { ls := &listenSocket{Listener: lis} s.lis[ls] = true - if channelz.IsOn() { - ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) - } - s.mu.Unlock() - defer func() { s.mu.Lock() if s.lis != nil && s.lis[ls] { @@ -773,8 +766,16 @@ func (s *Server) Serve(lis net.Listener) error { s.mu.Unlock() }() - var tempDelay time.Duration // how long to sleep on accept failure + var err error + ls.channelzID, err = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String()) + if err != nil { + s.mu.Unlock() + return err + } + s.mu.Unlock() + channelz.Info(logger, ls.channelzID, "ListenSocket created") + var tempDelay time.Duration // how long to sleep on accept failure for { rawConn, err := lis.Accept() if err != nil { @@ -1283,9 +1284,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - // Convert appErr if it is not a grpc status error. - appErr = status.Error(codes.Unknown, appErr.Error()) - appStatus, _ = status.FromError(appErr) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) + appErr = appStatus.Err() } if trInfo != nil { trInfo.tr.LazyLog(stringer(appStatus.Message()), true) @@ -1549,7 +1551,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { - appStatus = status.New(codes.Unknown, appErr.Error()) + // Convert non-status application error to a status error with code + // Unknown, but handle context errors specifically. + appStatus = status.FromContextError(appErr) appErr = appStatus.Err() } if trInfo != nil { @@ -1706,11 +1710,7 @@ func (s *Server) Stop() { s.done.Fire() }() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() listeners := s.lis @@ -1748,11 +1748,7 @@ func (s *Server) GracefulStop() { s.quit.Fire() defer s.done.Fire() - s.channelzRemoveOnce.Do(func() { - if channelz.IsOn() { - channelz.RemoveEntry(s.channelzID) - } - }) + s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() if s.conns == nil { s.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 22c4240cf7e..b01c548bb9a 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -218,7 +218,7 @@ type jsonSC struct { } func init() { - internal.ParseServiceConfigForTesting = parseServiceConfig + internal.ParseServiceConfig = parseServiceConfig } func parseServiceConfig(js string) *serviceconfig.ParseResult { if len(js) == 0 { @@ -381,6 +381,9 @@ func init() { // // If any of them is NOT *ServiceConfig, return false. func equalServiceConfig(a, b serviceconfig.Config) bool { + if a == nil && b == nil { + return true + } aa, ok := a.(*ServiceConfig) if !ok { return false diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 625d47b34e5..e0b30b46fb1 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -36,6 +36,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" "google.golang.org/grpc/internal/grpcutil" + imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/transport" @@ -46,10 +47,12 @@ import ( ) // StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. If a StreamHandler returns an error, it -// should be produced by the status package, or else gRPC will use -// codes.Unknown as the status code and err.Error() as the status message -// of the RPC. +// execution of a streaming RPC. +// +// If a StreamHandler returns an error, it should either be produced by the +// status package, or be one of the context errors. Otherwise, gRPC will use +// codes.Unknown as the status code and err.Error() as the status message of the +// RPC. type StreamHandler func(srv interface{}, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used @@ -164,6 +167,11 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if md, _, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if err := imetadata.Validate(md); err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + } if channelz.IsOn() { cc.incrCallsStarted() defer func() { @@ -454,7 +462,7 @@ type clientStream struct { retryThrottler *retryThrottler // The throttler active when the RPC began. - binlog *binarylog.MethodLogger // Binary logger, can be nil. + binlog binarylog.MethodLogger // Binary logger, can be nil. // serverHeaderBinlogged is a boolean for whether server header has been // logged. Server header will be logged when the first time one of those // happens: stream.Header(), stream.Recv(). @@ -1426,7 +1434,7 @@ type serverStream struct { statsHandler stats.Handler - binlog *binarylog.MethodLogger + binlog binarylog.MethodLogger // serverHeaderBinlogged indicates whether server header has been logged. It // will happen when one of the following two happens: stream.SendHeader(), // stream.Send(). @@ -1446,11 +1454,20 @@ func (ss *serverStream) SetHeader(md metadata.MD) error { if md.Len() == 0 { return nil } + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } return ss.s.SetHeader(md) } func (ss *serverStream) SendHeader(md metadata.MD) error { - err := ss.t.WriteHeader(ss.s, md) + err := imetadata.Validate(md) + if err != nil { + return status.Error(codes.Internal, err.Error()) + } + + err = ss.t.WriteHeader(ss.s, md) if ss.binlog != nil && !ss.serverHeaderBinlogged { h, _ := ss.s.Header() ss.binlog.Log(&binarylog.ServerHeader{ @@ -1465,6 +1482,9 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { if md.Len() == 0 { return } + if err := imetadata.Validate(md); err != nil { + logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err) + } ss.s.SetTrailer(md) } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 9d3fd73da94..6af76dfe7bb 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.44.1-dev" +const Version = "1.46.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index d923187a7b3..ceb436c6ce4 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:" + golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" go mod tidy git status --porcelain 2>&1 | fail_on_output || \ diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go index a427f8b7043..9c61112f58d 100644 --- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -21,10 +21,11 @@ import ( type Number int32 const ( - MinValidNumber Number = 1 - FirstReservedNumber Number = 19000 - LastReservedNumber Number = 19999 - MaxValidNumber Number = 1<<29 - 1 + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 + DefaultRecursionLimit = 10000 ) // IsValid reports whether the field number is semantically valid. @@ -55,6 +56,7 @@ const ( errCodeOverflow errCodeReserved errCodeEndGroup + errCodeRecursionDepth ) var ( @@ -112,6 +114,10 @@ func ConsumeField(b []byte) (Number, Type, int) { // When parsing a group, the length includes the end group marker and // the end group is verified to match the starting field number. func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + return consumeFieldValueD(num, typ, b, DefaultRecursionLimit) +} + +func consumeFieldValueD(num Number, typ Type, b []byte, depth int) (n int) { switch typ { case VarintType: _, n = ConsumeVarint(b) @@ -126,6 +132,9 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { _, n = ConsumeBytes(b) return n case StartGroupType: + if depth < 0 { + return errCodeRecursionDepth + } n0 := len(b) for { num2, typ2, n := ConsumeTag(b) @@ -140,7 +149,7 @@ func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { return n0 - len(b) } - n = ConsumeFieldValue(num2, typ2, b) + n = consumeFieldValueD(num2, typ2, b, depth-1) if n < 0 { return n // forward error code } diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go index eb10ea10261..37803773fa3 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -381,7 +381,7 @@ func (d *Decoder) currentOpenKind() (Kind, byte) { case '[': return ListOpen, ']' } - panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %c", openCh)) } func (d *Decoder) pushOpenStack(ch byte) { diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go index f90e909b37a..fbcd349207d 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.13 // +build !go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go index dc05f4191c0..5e72f1cde9e 100644 --- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.13 // +build go1.13 package errors diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go index a72995f02d9..bda8e8cf3fc 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !protolegacy // +build !protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go index 772e2f0e4d6..6d8d9bd6b01 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build protolegacy // +build protolegacy package flags diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go index 2706bb67f5d..4b15493f2f4 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !go1.12 // +build !go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go index 1533ef600cd..0b31b66eaf8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build go1.12 // +build go1.12 package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go index 90705e3aea7..145c577bd6b 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index e118af1e20c..757642e23c9 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go index 949dc49a65b..c65b0325c17 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/decode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -18,6 +18,7 @@ import ( ) var errDecode = errors.New("cannot parse invalid wire-format data") +var errRecursionDepth = errors.New("exceeded maximum recursion depth") type unmarshalOptions struct { flags protoiface.UnmarshalInputFlags @@ -25,6 +26,7 @@ type unmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + depth int } func (o unmarshalOptions) Options() proto.UnmarshalOptions { @@ -44,6 +46,7 @@ func (o unmarshalOptions) IsDefault() bool { var lazyUnmarshalOptions = unmarshalOptions{ resolver: preg.GlobalTypes, + depth: protowire.DefaultRecursionLimit, } type unmarshalOutput struct { @@ -62,6 +65,7 @@ func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutp out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ flags: in.Flags, resolver: in.Resolver, + depth: in.Depth, }) var flags piface.UnmarshalOutputFlags if out.initialized { @@ -82,6 +86,10 @@ var errUnknown = errors.New("unknown") func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { mi.init() + opts.depth-- + if opts.depth < 0 { + return out, errRecursionDepth + } if flags.ProtoLegacy && mi.isMessageSet { return unmarshalMessageSet(mi, b, p, opts) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go index 9e3ed821efb..4c491bdf482 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 9ecf23a85bb..ee0e0573e39 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package impl diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go index 85e074c977d..a1f6f333860 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 2160c701914..56a8a4ed3c9 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 14e774fb2ec..3d40d5249e9 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,8 +52,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 27 - Patch = 1 + Minor = 28 + Patch = 0 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index 49f9b8c88cf..11bf7173be9 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -42,18 +42,25 @@ type UnmarshalOptions struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + + // RecursionLimit limits how deeply messages may be nested. + // If zero, a default limit is applied. + RecursionLimit int } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func Unmarshal(b []byte, m Message) error { - _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + _, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect()) return err } // Unmarshal parses the wire-format message in b and places the result in m. // The provided message must be mutable (e.g., a non-nil pointer to a message). func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } _, err := o.unmarshal(b, m.ProtoReflect()) return err } @@ -63,6 +70,9 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { // This method permits fine-grained control over the unmarshaler. // Most users should use Unmarshal instead. func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + if o.RecursionLimit == 0 { + o.RecursionLimit = protowire.DefaultRecursionLimit + } return o.unmarshal(in.Buf, in.Message) } @@ -86,12 +96,17 @@ func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out proto Message: m, Buf: b, Resolver: o.Resolver, + Depth: o.RecursionLimit, } if o.DiscardUnknown { in.Flags |= protoiface.UnmarshalDiscardUnknown } out, err = methods.Unmarshal(in) } else { + o.RecursionLimit-- + if o.RecursionLimit < 0 { + return out, errors.New("exceeded max recursion depth") + } err = o.unmarshalMessageSlow(b, m) } if err != nil { diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go index d8dd604f6b6..465e057b323 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_methods.go +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build !protoreflect // +build !protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go index b103d43205c..494d6ceef9e 100644 --- a/vendor/google.golang.org/protobuf/proto/proto_reflect.go +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // The protoreflect build tag disables use of fast-path methods. +//go:build protoreflect // +build protoreflect package proto diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index 6be5d16e9f3..d5d5af6ebed 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -53,6 +53,7 @@ type ( FindExtensionByName(field FullName) (ExtensionType, error) FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) } + Depth int } unmarshalOutput = struct { pragma.NoUnkeyedLiterals diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go index 918e685e1d5..7ced876f4e8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build purego || appengine // +build purego appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go index 5a341472419..eb7764c307c 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -41,6 +41,31 @@ import ( // Converting to/from a Value and a concrete Go value panics on type mismatch. // For example, ValueOf("hello").Int() panics because this attempts to // retrieve an int64 from a string. +// +// List, Map, and Message Values are called "composite" values. +// +// A composite Value may alias (reference) memory at some location, +// such that changes to the Value updates the that location. +// A composite value acquired with a Mutable method, such as Message.Mutable, +// always references the source object. +// +// For example: +// // Append a 0 to a "repeated int32" field. +// // Since the Value returned by Mutable is guaranteed to alias +// // the source message, modifying the Value modifies the message. +// message.Mutable(fieldDesc).(List).Append(protoreflect.ValueOfInt32(0)) +// +// // Assign [0] to a "repeated int32" field by creating a new Value, +// // modifying it, and assigning it. +// list := message.NewField(fieldDesc).(List) +// list.Append(protoreflect.ValueOfInt32(0)) +// message.Set(fieldDesc, list) +// // ERROR: Since it is not defined whether Set aliases the source, +// // appending to the List here may or may not modify the message. +// list.Append(protoreflect.ValueOfInt32(0)) +// +// Some operations, such as Message.Get, may return an "empty, read-only" +// composite Value. Modifying an empty, read-only value panics. type Value value // The protoreflect API uses a custom Value union type instead of interface{} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index c45debdcac6..702ddf22a27 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build !purego && !appengine // +build !purego,!appengine package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 32c04f67eb7..44cf467d884 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -103,6 +103,7 @@ type UnmarshalInput = struct { FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) } + Depth int } // UnmarshalOutput is output from the Unmarshal method. diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go new file mode 100644 index 00000000000..586690522a4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -0,0 +1,810 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +// Package structpb contains generated types for google/protobuf/struct.proto. +// +// The messages (i.e., Value, Struct, and ListValue) defined in struct.proto are +// used to represent arbitrary JSON. The Value message represents a JSON value, +// the Struct message represents a JSON object, and the ListValue message +// represents a JSON array. See https://json.org for more information. +// +// The Value, Struct, and ListValue types have generated MarshalJSON and +// UnmarshalJSON methods such that they serialize JSON equivalent to what the +// messages themselves represent. Use of these types with the +// "google.golang.org/protobuf/encoding/protojson" package +// ensures that they will be serialized as their JSON equivalent. +// +// +// Conversion to and from a Go interface +// +// The standard Go "encoding/json" package has functionality to serialize +// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and +// ListValue.AsSlice methods can convert the protobuf message representation into +// a form represented by interface{}, map[string]interface{}, and []interface{}. +// This form can be used with other packages that operate on such data structures +// and also directly with the standard json package. +// +// In order to convert the interface{}, map[string]interface{}, and []interface{} +// forms back as Value, Struct, and ListValue messages, use the NewStruct, +// NewList, and NewValue constructor functions. +// +// +// Example usage +// +// Consider the following example JSON object: +// +// { +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": { +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100" +// }, +// "phoneNumbers": [ +// { +// "type": "home", +// "number": "212 555-1234" +// }, +// { +// "type": "office", +// "number": "646 555-4567" +// } +// ], +// "children": [], +// "spouse": null +// } +// +// To construct a Value message representing the above JSON object: +// +// m, err := structpb.NewValue(map[string]interface{}{ +// "firstName": "John", +// "lastName": "Smith", +// "isAlive": true, +// "age": 27, +// "address": map[string]interface{}{ +// "streetAddress": "21 2nd Street", +// "city": "New York", +// "state": "NY", +// "postalCode": "10021-3100", +// }, +// "phoneNumbers": []interface{}{ +// map[string]interface{}{ +// "type": "home", +// "number": "212 555-1234", +// }, +// map[string]interface{}{ +// "type": "office", +// "number": "646 555-4567", +// }, +// }, +// "children": []interface{}{}, +// "spouse": nil, +// }) +// if err != nil { +// ... // handle error +// } +// ... // make use of m as a *structpb.Value +// +package structpb + +import ( + base64 "encoding/base64" + protojson "google.golang.org/protobuf/encoding/protojson" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + math "math" + reflect "reflect" + sync "sync" + utf8 "unicode/utf8" +) + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +// Enum value maps for NullValue. +var ( + NullValue_name = map[int32]string{ + 0: "NULL_VALUE", + } + NullValue_value = map[string]int32{ + "NULL_VALUE": 0, + } +) + +func (x NullValue) Enum() *NullValue { + p := new(NullValue) + *p = x + return p +} + +func (x NullValue) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (NullValue) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_struct_proto_enumTypes[0].Descriptor() +} + +func (NullValue) Type() protoreflect.EnumType { + return &file_google_protobuf_struct_proto_enumTypes[0] +} + +func (x NullValue) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use NullValue.Descriptor instead. +func (NullValue) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +// NewStruct constructs a Struct from a general-purpose Go map. +// The map keys must be valid UTF-8. +// The map values are converted using NewValue. +func NewStruct(v map[string]interface{}) (*Struct, error) { + x := &Struct{Fields: make(map[string]*Value, len(v))} + for k, v := range v { + if !utf8.ValidString(k) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", k) + } + var err error + x.Fields[k], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsMap converts x to a general-purpose Go map. +// The map values are converted by calling Value.AsInterface. +func (x *Struct) AsMap() map[string]interface{} { + vs := make(map[string]interface{}) + for k, v := range x.GetFields() { + vs[k] = v.AsInterface() + } + return vs +} + +func (x *Struct) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Struct) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Struct) Reset() { + *x = Struct{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Struct) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Struct) ProtoMessage() {} + +func (x *Struct) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Struct.ProtoReflect.Descriptor instead. +func (*Struct) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{0} +} + +func (x *Struct) GetFields() map[string]*Value { + if x != nil { + return x.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The kind of value. + // + // Types that are assignable to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +// NewValue constructs a Value from a general-purpose Go interface. +// +// ╔════════════════════════╀════════════════════════════════════════════╗ +// β•‘ Go type β”‚ Conversion β•‘ +// ╠════════════════════════β•ͺ════════════════════════════════════════════╣ +// β•‘ nil β”‚ stored as NullValue β•‘ +// β•‘ bool β”‚ stored as BoolValue β•‘ +// β•‘ int, int32, int64 β”‚ stored as NumberValue β•‘ +// β•‘ uint, uint32, uint64 β”‚ stored as NumberValue β•‘ +// β•‘ float32, float64 β”‚ stored as NumberValue β•‘ +// β•‘ string β”‚ stored as StringValue; must be valid UTF-8 β•‘ +// β•‘ []byte β”‚ stored as StringValue; base64-encoded β•‘ +// β•‘ map[string]interface{} β”‚ stored as StructValue β•‘ +// β•‘ []interface{} β”‚ stored as ListValue β•‘ +// β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•§β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β• +// +// When converting an int64 or uint64 to a NumberValue, numeric precision loss +// is possible since they are stored as a float64. +func NewValue(v interface{}) (*Value, error) { + switch v := v.(type) { + case nil: + return NewNullValue(), nil + case bool: + return NewBoolValue(v), nil + case int: + return NewNumberValue(float64(v)), nil + case int32: + return NewNumberValue(float64(v)), nil + case int64: + return NewNumberValue(float64(v)), nil + case uint: + return NewNumberValue(float64(v)), nil + case uint32: + return NewNumberValue(float64(v)), nil + case uint64: + return NewNumberValue(float64(v)), nil + case float32: + return NewNumberValue(float64(v)), nil + case float64: + return NewNumberValue(float64(v)), nil + case string: + if !utf8.ValidString(v) { + return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v) + } + return NewStringValue(v), nil + case []byte: + s := base64.StdEncoding.EncodeToString(v) + return NewStringValue(s), nil + case map[string]interface{}: + v2, err := NewStruct(v) + if err != nil { + return nil, err + } + return NewStructValue(v2), nil + case []interface{}: + v2, err := NewList(v) + if err != nil { + return nil, err + } + return NewListValue(v2), nil + default: + return nil, protoimpl.X.NewError("invalid type: %T", v) + } +} + +// NewNullValue constructs a new null Value. +func NewNullValue() *Value { + return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}} +} + +// NewBoolValue constructs a new boolean Value. +func NewBoolValue(v bool) *Value { + return &Value{Kind: &Value_BoolValue{BoolValue: v}} +} + +// NewNumberValue constructs a new number Value. +func NewNumberValue(v float64) *Value { + return &Value{Kind: &Value_NumberValue{NumberValue: v}} +} + +// NewStringValue constructs a new string Value. +func NewStringValue(v string) *Value { + return &Value{Kind: &Value_StringValue{StringValue: v}} +} + +// NewStructValue constructs a new struct Value. +func NewStructValue(v *Struct) *Value { + return &Value{Kind: &Value_StructValue{StructValue: v}} +} + +// NewListValue constructs a new list Value. +func NewListValue(v *ListValue) *Value { + return &Value{Kind: &Value_ListValue{ListValue: v}} +} + +// AsInterface converts x to a general-purpose Go interface. +// +// Calling Value.MarshalJSON and "encoding/json".Marshal on this output produce +// semantically equivalent JSON (assuming no errors occur). +// +// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are +// converted as strings to remain compatible with MarshalJSON. +func (x *Value) AsInterface() interface{} { + switch v := x.GetKind().(type) { + case *Value_NumberValue: + if v != nil { + switch { + case math.IsNaN(v.NumberValue): + return "NaN" + case math.IsInf(v.NumberValue, +1): + return "Infinity" + case math.IsInf(v.NumberValue, -1): + return "-Infinity" + default: + return v.NumberValue + } + } + case *Value_StringValue: + if v != nil { + return v.StringValue + } + case *Value_BoolValue: + if v != nil { + return v.BoolValue + } + case *Value_StructValue: + if v != nil { + return v.StructValue.AsMap() + } + case *Value_ListValue: + if v != nil { + return v.ListValue.AsSlice() + } + } + return nil +} + +func (x *Value) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *Value) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *Value) Reset() { + *x = Value{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Value) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Value) ProtoMessage() {} + +func (x *Value) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Value.ProtoReflect.Descriptor instead. +func (*Value) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1} +} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (x *Value) GetNullValue() NullValue { + if x, ok := x.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (x *Value) GetNumberValue() float64 { + if x, ok := x.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (x *Value) GetStringValue() string { + if x, ok := x.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *Value) GetBoolValue() bool { + if x, ok := x.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *Value) GetStructValue() *Struct { + if x, ok := x.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (x *Value) GetListValue() *ListValue { + if x, ok := x.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + // Represents a null value. + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + // Represents a double value. + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + // Represents a string value. + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + // Represents a boolean value. + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + // Represents a structured value. + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + // Represents a repeated `Value`. + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// NewList constructs a ListValue from a general-purpose Go slice. +// The slice elements are converted using NewValue. +func NewList(v []interface{}) (*ListValue, error) { + x := &ListValue{Values: make([]*Value, len(v))} + for i, v := range v { + var err error + x.Values[i], err = NewValue(v) + if err != nil { + return nil, err + } + } + return x, nil +} + +// AsSlice converts x to a general-purpose Go slice. +// The slice elements are converted by calling Value.AsInterface. +func (x *ListValue) AsSlice() []interface{} { + vs := make([]interface{}, len(x.GetValues())) + for i, v := range x.GetValues() { + vs[i] = v.AsInterface() + } + return vs +} + +func (x *ListValue) MarshalJSON() ([]byte, error) { + return protojson.Marshal(x) +} + +func (x *ListValue) UnmarshalJSON(b []byte) error { + return protojson.Unmarshal(b, x) +} + +func (x *ListValue) Reset() { + *x = ListValue{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListValue) ProtoMessage() {} + +func (x *ListValue) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_struct_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListValue.ProtoReflect.Descriptor instead. +func (*ListValue) Descriptor() ([]byte, []int) { + return file_google_protobuf_struct_proto_rawDescGZIP(), []int{2} +} + +func (x *ListValue) GetValues() []*Value { + if x != nil { + return x.Values + } + return nil +} + +var File_google_protobuf_struct_proto protoreflect.FileDescriptor + +var file_google_protobuf_struct_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, + 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, + 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, + 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, + 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, + 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, + 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, + 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, + 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, + 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, + 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_google_protobuf_struct_proto_rawDescOnce sync.Once + file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc +) + +func file_google_protobuf_struct_proto_rawDescGZIP() []byte { + file_google_protobuf_struct_proto_rawDescOnce.Do(func() { + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + }) + return file_google_protobuf_struct_proto_rawDescData +} + +var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_google_protobuf_struct_proto_goTypes = []interface{}{ + (NullValue)(0), // 0: google.protobuf.NullValue + (*Struct)(nil), // 1: google.protobuf.Struct + (*Value)(nil), // 2: google.protobuf.Value + (*ListValue)(nil), // 3: google.protobuf.ListValue + nil, // 4: google.protobuf.Struct.FieldsEntry +} +var file_google_protobuf_struct_proto_depIdxs = []int32{ + 4, // 0: google.protobuf.Struct.fields:type_name -> google.protobuf.Struct.FieldsEntry + 0, // 1: google.protobuf.Value.null_value:type_name -> google.protobuf.NullValue + 1, // 2: google.protobuf.Value.struct_value:type_name -> google.protobuf.Struct + 3, // 3: google.protobuf.Value.list_value:type_name -> google.protobuf.ListValue + 2, // 4: google.protobuf.ListValue.values:type_name -> google.protobuf.Value + 2, // 5: google.protobuf.Struct.FieldsEntry.value:type_name -> google.protobuf.Value + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_google_protobuf_struct_proto_init() } +func file_google_protobuf_struct_proto_init() { + if File_google_protobuf_struct_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Struct); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Value); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_struct_proto_goTypes, + DependencyIndexes: file_google_protobuf_struct_proto_depIdxs, + EnumInfos: file_google_protobuf_struct_proto_enumTypes, + MessageInfos: file_google_protobuf_struct_proto_msgTypes, + }.Build() + File_google_protobuf_struct_proto = out.File + file_google_protobuf_struct_proto_rawDesc = nil + file_google_protobuf_struct_proto_goTypes = nil + file_google_protobuf_struct_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc new file mode 100644 index 00000000000..730e569b069 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc @@ -0,0 +1 @@ +'|Κ&{tΔU|gGκ(μCy=+¨œςcϋ:u:/pœ#~žό["±4€!­nΩAͺDK<ŠufhΕaΏΒ:ΊόΈ‘΄B/£Ψ€Ή€ς_hΞΫSγT*wΜxΌ―Ή-η|ΰΐΣƒΡΔδσΜγ£—A$$β6£ΑβG)8nΟpϋΖΛ‘3̚œoοΟvŽB–3Ώ­]xέ“Σ2l§G•|qRή― φ2 5R–ΣΧΗ$΄ρ½Yθ‘ήέ™l‘Λ«yAI"ی˜νΓ»ΉΌkΔ|Kεώ[9Ζβε=°ϊŸρ|@S•3 σ#ζx?ΎV„,Ύ‚SΖέυœwPνog6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/gopkg.in/square/go-jose.v2/.gitignore new file mode 100644 index 00000000000..95a851586a5 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.gitignore @@ -0,0 +1,8 @@ +*~ +.*.swp +*.out +*.test +*.pem +*.cov +jose-util/jose-util +jose-util.t.err \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml new file mode 100644 index 00000000000..ae69862df29 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/.travis.yml @@ -0,0 +1,45 @@ +language: go + +sudo: false + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: +- '1.11.x' +- '1.12.x' +- tip + +go_import_path: gopkg.in/square/go-jose.v2 + +before_script: +- export PATH=$HOME/.local/bin:$PATH + +before_install: +# Install encrypted gitcookies to get around bandwidth-limits +# that is causing Travis-CI builds to fail. For more info, see +# https://github.com/golang/go/issues/12933 +- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true +- bash .gitcookies.sh || true +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get github.com/stretchr/testify/assert +- go get github.com/stretchr/testify/require +- go get github.com/google/go-cmp/cmp +- go get golang.org/x/tools/cmd/cover || true +- go get code.google.com/p/go.tools/cmd/cover || true +- pip install cram --user + +script: +- go test . -v -covermode=count -coverprofile=profile.cov +- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov +- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov +- go test ./json -v # no coverage for forked encoding/json package +- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util +- cd .. + +after_success: +- gocovmerge *.cov */*.cov > merged.coverprofile +- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md new file mode 100644 index 00000000000..3305db0f653 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md @@ -0,0 +1,10 @@ +Serious about security +====================== + +Square recognizes the important contributions the security research community +can make. We therefore encourage reporting security issues with the code +contained in this repository. + +If you believe you have discovered a security vulnerability, please follow the +guidelines at . + diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md new file mode 100644 index 00000000000..61b183651c0 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md @@ -0,0 +1,14 @@ +# Contributing + +If you would like to contribute code to go-jose you can do so through GitHub by +forking the repository and sending a pull request. + +When submitting code, please make every effort to follow existing conventions +and style in order to keep the code as readable as possible. Please also make +sure all tests pass by running `go test`, and format your code with `go fmt`. +We also recommend using `golint` and `errcheck`. + +Before your code can be accepted into the project you must also sign the +[Individual Contributor License Agreement][1]. + + [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/gopkg.in/square/go-jose.v2/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/gopkg.in/square/go-jose.v2/README.md new file mode 100644 index 00000000000..1791bfa8f67 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/README.md @@ -0,0 +1,118 @@ +# Go JOSE + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) +[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose) +[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose) + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. This includes support for JSON Web Encryption, +JSON Web Signature, and JSON Web Token standards. + +**Disclaimer**: This library contains encryption software that is subject to +the U.S. Export Administration Regulations. You may not export, re-export, +transfer or download this code or any part of it in violation of any United +States law, directive or regulation. In particular this software may not be +exported or re-exported in any form or on any media to Iran, North Sudan, +Syria, Cuba, or North Korea, or to denied persons or entities mentioned on any +US maintained blocked list. + +## Overview + +The implementation follows the +[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), +[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). +Tables of supported algorithms are shown below. The library supports both +the compact and full serialization formats, and has optional support for +multiple recipients. It also comes with a small command-line utility +([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) +for dealing with JOSE messages in a shell. + +**Note**: We use a forked version of the `encoding/json` package from the Go +standard library which uses case-sensitive matching for member names (instead +of [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html)). +This is to avoid differences in interpretation of messages between go-jose and +libraries in other languages. + +### Versions + +We use [gopkg.in](https://gopkg.in) for versioning. + +[Version 2](https://gopkg.in/square/go-jose.v2) +([branch](https://github.com/square/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version: + + import "gopkg.in/square/go-jose.v2" + +The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will +still receive backported bug fixes and security fixes, but otherwise +development is frozen. All new feature development takes place on the `v2` +branch. Version 2 also contains additional sub-packages such as the +[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation +contributed by [@shaxbee](https://github.com/shaxbee). + +### Supported algorithms + +See below for a table of supported algorithms. Algorithm identifiers match +the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518) +standard where possible. The Godoc reference has a list of constants. + + Key encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSA-PKCS#1v1.5 | RSA1_5 + RSA-OAEP | RSA-OAEP, RSA-OAEP-256 + AES key wrap | A128KW, A192KW, A256KW + AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW + ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW + ECDH-ES (direct) | ECDH-ES1 + Direct encryption | dir1 + +1. Not supported in multi-recipient mode + + Signing / MAC | Algorithm identifier(s) + :------------------------- | :------------------------------ + RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 + RSASSA-PSS | PS256, PS384, PS512 + HMAC | HS256, HS384, HS512 + ECDSA | ES256, ES384, ES512 + Ed25519 | EdDSA2 + +2. Only available in version 2 of the package + + Content encryption | Algorithm identifier(s) + :------------------------- | :------------------------------ + AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 + AES-GCM | A128GCM, A192GCM, A256GCM + + Compression | Algorithm identifiers(s) + :------------------------- | ------------------------------- + DEFLATE (RFC 1951) | DEF + +### Supported key types + +See below for a table of supported key types. These are understood by the +library, and can be passed to corresponding functions such as `NewEncrypter` or +`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which +allows attaching a key id. + + Algorithm(s) | Corresponding types + :------------------------- | ------------------------------- + RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) + ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) + AES, HMAC | []byte + +1. Only available in version 2 of the package + +## Examples + +[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) +[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) + +Examples can be found in the Godoc +reference for this package. The +[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) +subdirectory also contains a small command-line utility which might be useful +as an example. diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go new file mode 100644 index 00000000000..b69aa0369c0 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/asymmetric.go @@ -0,0 +1,592 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto" + "crypto/aes" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "errors" + "fmt" + "math/big" + + "golang.org/x/crypto/ed25519" + josecipher "gopkg.in/square/go-jose.v2/cipher" + "gopkg.in/square/go-jose.v2/json" +) + +// A generic RSA-based encrypter/verifier +type rsaEncrypterVerifier struct { + publicKey *rsa.PublicKey +} + +// A generic RSA-based decrypter/signer +type rsaDecrypterSigner struct { + privateKey *rsa.PrivateKey +} + +// A generic EC-based encrypter/verifier +type ecEncrypterVerifier struct { + publicKey *ecdsa.PublicKey +} + +type edEncrypterVerifier struct { + publicKey ed25519.PublicKey +} + +// A key generator for ECDH-ES +type ecKeyGenerator struct { + size int + algID string + publicKey *ecdsa.PublicKey +} + +// A generic EC-based decrypter/signer +type ecDecrypterSigner struct { + privateKey *ecdsa.PrivateKey +} + +type edDecrypterSigner struct { + privateKey ed25519.PrivateKey +} + +// newRSARecipient creates recipientKeyInfo based on the given key. +func newRSARecipient(keyAlg KeyAlgorithm, publicKey *rsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case RSA1_5, RSA_OAEP, RSA_OAEP_256: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &rsaEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newRSASigner creates a recipientSigInfo based on the given key. +func newRSASigner(sigAlg SignatureAlgorithm, privateKey *rsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case RS256, RS384, RS512, PS256, PS384, PS512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &rsaDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +func newEd25519Signer(sigAlg SignatureAlgorithm, privateKey ed25519.PrivateKey) (recipientSigInfo, error) { + if sigAlg != EdDSA { + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &edDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// newECDHRecipient creates recipientKeyInfo based on the given key. +func newECDHRecipient(keyAlg KeyAlgorithm, publicKey *ecdsa.PublicKey) (recipientKeyInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch keyAlg { + case ECDH_ES, ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientKeyInfo{}, ErrUnsupportedAlgorithm + } + + if publicKey == nil || !publicKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return recipientKeyInfo{}, errors.New("invalid public key") + } + + return recipientKeyInfo{ + keyAlg: keyAlg, + keyEncrypter: &ecEncrypterVerifier{ + publicKey: publicKey, + }, + }, nil +} + +// newECDSASigner creates a recipientSigInfo based on the given key. +func newECDSASigner(sigAlg SignatureAlgorithm, privateKey *ecdsa.PrivateKey) (recipientSigInfo, error) { + // Verify that key management algorithm is supported by this encrypter + switch sigAlg { + case ES256, ES384, ES512: + default: + return recipientSigInfo{}, ErrUnsupportedAlgorithm + } + + if privateKey == nil { + return recipientSigInfo{}, errors.New("invalid private key") + } + + return recipientSigInfo{ + sigAlg: sigAlg, + publicKey: staticPublicKey(&JSONWebKey{ + Key: privateKey.Public(), + }), + signer: &ecDecrypterSigner{ + privateKey: privateKey, + }, + }, nil +} + +// Encrypt the given payload and update the object. +func (ctx rsaEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + encryptedKey, err := ctx.encrypt(cek, alg) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: encryptedKey, + header: &rawHeader{}, + }, nil +} + +// Encrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaEncrypterVerifier) encrypt(cek []byte, alg KeyAlgorithm) ([]byte, error) { + switch alg { + case RSA1_5: + return rsa.EncryptPKCS1v15(RandReader, ctx.publicKey, cek) + case RSA_OAEP: + return rsa.EncryptOAEP(sha1.New(), RandReader, ctx.publicKey, cek, []byte{}) + case RSA_OAEP_256: + return rsa.EncryptOAEP(sha256.New(), RandReader, ctx.publicKey, cek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Decrypt the given payload and return the content encryption key. +func (ctx rsaDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + return ctx.decrypt(recipient.encryptedKey, headers.getAlgorithm(), generator) +} + +// Decrypt the given payload. Based on the key encryption algorithm, +// this will either use RSA-PKCS1v1.5 or RSA-OAEP (with SHA-1 or SHA-256). +func (ctx rsaDecrypterSigner) decrypt(jek []byte, alg KeyAlgorithm, generator keyGenerator) ([]byte, error) { + // Note: The random reader on decrypt operations is only used for blinding, + // so stubbing is meanlingless (hence the direct use of rand.Reader). + switch alg { + case RSA1_5: + defer func() { + // DecryptPKCS1v15SessionKey sometimes panics on an invalid payload + // because of an index out of bounds error, which we want to ignore. + // This has been fixed in Go 1.3.1 (released 2014/08/13), the recover() + // only exists for preventing crashes with unpatched versions. + // See: https://groups.google.com/forum/#!topic/golang-dev/7ihX6Y6kx9k + // See: https://code.google.com/p/go/source/detail?r=58ee390ff31602edb66af41ed10901ec95904d33 + _ = recover() + }() + + // Perform some input validation. + keyBytes := ctx.privateKey.PublicKey.N.BitLen() / 8 + if keyBytes != len(jek) { + // Input size is incorrect, the encrypted payload should always match + // the size of the public modulus (e.g. using a 2048 bit key will + // produce 256 bytes of output). Reject this since it's invalid input. + return nil, ErrCryptoFailure + } + + cek, _, err := generator.genKey() + if err != nil { + return nil, ErrCryptoFailure + } + + // When decrypting an RSA-PKCS1v1.5 payload, we must take precautions to + // prevent chosen-ciphertext attacks as described in RFC 3218, "Preventing + // the Million Message Attack on Cryptographic Message Syntax". We are + // therefore deliberately ignoring errors here. + _ = rsa.DecryptPKCS1v15SessionKey(rand.Reader, ctx.privateKey, jek, cek) + + return cek, nil + case RSA_OAEP: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha1.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + case RSA_OAEP_256: + // Use rand.Reader for RSA blinding + return rsa.DecryptOAEP(sha256.New(), rand.Reader, ctx.privateKey, jek, []byte{}) + } + + return nil, ErrUnsupportedAlgorithm +} + +// Sign the given payload +func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return Signature{}, ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + var out []byte + var err error + + switch alg { + case RS256, RS384, RS512: + out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed) + case PS256, PS384, PS512: + out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }) + } + + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx rsaEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var hash crypto.Hash + + switch alg { + case RS256, PS256: + hash = crypto.SHA256 + case RS384, PS384: + hash = crypto.SHA384 + case RS512, PS512: + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + switch alg { + case RS256, RS384, RS512: + return rsa.VerifyPKCS1v15(ctx.publicKey, hash, hashed, signature) + case PS256, PS384, PS512: + return rsa.VerifyPSS(ctx.publicKey, hash, hashed, signature, nil) + } + + return ErrUnsupportedAlgorithm +} + +// Encrypt the given payload and update the object. +func (ctx ecEncrypterVerifier) encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) { + switch alg { + case ECDH_ES: + // ECDH-ES mode doesn't wrap a key, the shared secret is used directly as the key. + return recipientInfo{ + header: &rawHeader{}, + }, nil + case ECDH_ES_A128KW, ECDH_ES_A192KW, ECDH_ES_A256KW: + default: + return recipientInfo{}, ErrUnsupportedAlgorithm + } + + generator := ecKeyGenerator{ + algID: string(alg), + publicKey: ctx.publicKey, + } + + switch alg { + case ECDH_ES_A128KW: + generator.size = 16 + case ECDH_ES_A192KW: + generator.size = 24 + case ECDH_ES_A256KW: + generator.size = 32 + } + + kek, header, err := generator.genKey() + if err != nil { + return recipientInfo{}, err + } + + block, err := aes.NewCipher(kek) + if err != nil { + return recipientInfo{}, err + } + + jek, err := josecipher.KeyWrap(block, cek) + if err != nil { + return recipientInfo{}, err + } + + return recipientInfo{ + encryptedKey: jek, + header: &header, + }, nil +} + +// Get key size for EC key generator +func (ctx ecKeyGenerator) keySize() int { + return ctx.size +} + +// Get a content encryption key for ECDH-ES +func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { + priv, err := ecdsa.GenerateKey(ctx.publicKey.Curve, RandReader) + if err != nil { + return nil, rawHeader{}, err + } + + out := josecipher.DeriveECDHES(ctx.algID, []byte{}, []byte{}, priv, ctx.publicKey, ctx.size) + + b, err := json.Marshal(&JSONWebKey{ + Key: &priv.PublicKey, + }) + if err != nil { + return nil, nil, err + } + + headers := rawHeader{ + headerEPK: makeRawMessage(b), + } + + return out, headers, nil +} + +// Decrypt the given payload and return the content encryption key. +func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { + epk, err := headers.getEPK() + if err != nil { + return nil, errors.New("square/go-jose: invalid epk header") + } + if epk == nil { + return nil, errors.New("square/go-jose: missing epk header") + } + + publicKey, ok := epk.Key.(*ecdsa.PublicKey) + if publicKey == nil || !ok { + return nil, errors.New("square/go-jose: invalid epk header") + } + + if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { + return nil, errors.New("square/go-jose: invalid public key in epk header") + } + + apuData, err := headers.getAPU() + if err != nil { + return nil, errors.New("square/go-jose: invalid apu header") + } + apvData, err := headers.getAPV() + if err != nil { + return nil, errors.New("square/go-jose: invalid apv header") + } + + deriveKey := func(algID string, size int) []byte { + return josecipher.DeriveECDHES(algID, apuData.bytes(), apvData.bytes(), ctx.privateKey, publicKey, size) + } + + var keySize int + + algorithm := headers.getAlgorithm() + switch algorithm { + case ECDH_ES: + // ECDH-ES uses direct key agreement, no key unwrapping necessary. + return deriveKey(string(headers.getEncryption()), generator.keySize()), nil + case ECDH_ES_A128KW: + keySize = 16 + case ECDH_ES_A192KW: + keySize = 24 + case ECDH_ES_A256KW: + keySize = 32 + default: + return nil, ErrUnsupportedAlgorithm + } + + key := deriveKey(string(algorithm), keySize) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + return josecipher.KeyUnwrap(block, recipient.encryptedKey) +} + +func (ctx edDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + if alg != EdDSA { + return Signature{}, ErrUnsupportedAlgorithm + } + + sig, err := ctx.privateKey.Sign(RandReader, payload, crypto.Hash(0)) + if err != nil { + return Signature{}, err + } + + return Signature{ + Signature: sig, + protected: &rawHeader{}, + }, nil +} + +func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + if alg != EdDSA { + return ErrUnsupportedAlgorithm + } + ok := ed25519.Verify(ctx.publicKey, payload, signature) + if !ok { + return errors.New("square/go-jose: ed25519 signature failed to verify") + } + return nil +} + +// Sign the given payload +func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { + var expectedBitSize int + var hash crypto.Hash + + switch alg { + case ES256: + expectedBitSize = 256 + hash = crypto.SHA256 + case ES384: + expectedBitSize = 384 + hash = crypto.SHA384 + case ES512: + expectedBitSize = 521 + hash = crypto.SHA512 + } + + curveBits := ctx.privateKey.Curve.Params().BitSize + if expectedBitSize != curveBits { + return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r, s, err := ecdsa.Sign(RandReader, ctx.privateKey, hashed) + if err != nil { + return Signature{}, err + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes++ + } + + // We serialize the outputs (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return Signature{ + Signature: out, + protected: &rawHeader{}, + }, nil +} + +// Verify the given payload +func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, alg SignatureAlgorithm) error { + var keySize int + var hash crypto.Hash + + switch alg { + case ES256: + keySize = 32 + hash = crypto.SHA256 + case ES384: + keySize = 48 + hash = crypto.SHA384 + case ES512: + keySize = 66 + hash = crypto.SHA512 + default: + return ErrUnsupportedAlgorithm + } + + if len(signature) != 2*keySize { + return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + } + + hasher := hash.New() + + // According to documentation, Write() on hash never fails + _, _ = hasher.Write(payload) + hashed := hasher.Sum(nil) + + r := big.NewInt(0).SetBytes(signature[:keySize]) + s := big.NewInt(0).SetBytes(signature[keySize:]) + + match := ecdsa.Verify(ctx.publicKey, hashed, r, s) + if !match { + return errors.New("square/go-jose: ecdsa signature failed to verify") + } + + return nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go new file mode 100644 index 00000000000..126b85ce252 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go @@ -0,0 +1,196 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto/cipher" + "crypto/hmac" + "crypto/sha256" + "crypto/sha512" + "crypto/subtle" + "encoding/binary" + "errors" + "hash" +) + +const ( + nonceBytes = 16 +) + +// NewCBCHMAC instantiates a new AEAD based on CBC+HMAC. +func NewCBCHMAC(key []byte, newBlockCipher func([]byte) (cipher.Block, error)) (cipher.AEAD, error) { + keySize := len(key) / 2 + integrityKey := key[:keySize] + encryptionKey := key[keySize:] + + blockCipher, err := newBlockCipher(encryptionKey) + if err != nil { + return nil, err + } + + var hash func() hash.Hash + switch keySize { + case 16: + hash = sha256.New + case 24: + hash = sha512.New384 + case 32: + hash = sha512.New + } + + return &cbcAEAD{ + hash: hash, + blockCipher: blockCipher, + authtagBytes: keySize, + integrityKey: integrityKey, + }, nil +} + +// An AEAD based on CBC+HMAC +type cbcAEAD struct { + hash func() hash.Hash + authtagBytes int + integrityKey []byte + blockCipher cipher.Block +} + +func (ctx *cbcAEAD) NonceSize() int { + return nonceBytes +} + +func (ctx *cbcAEAD) Overhead() int { + // Maximum overhead is block size (for padding) plus auth tag length, where + // the length of the auth tag is equivalent to the key size. + return ctx.blockCipher.BlockSize() + ctx.authtagBytes +} + +// Seal encrypts and authenticates the plaintext. +func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { + // Output buffer -- must take care not to mangle plaintext input. + ciphertext := make([]byte, uint64(len(plaintext))+uint64(ctx.Overhead()))[:len(plaintext)] + copy(ciphertext, plaintext) + ciphertext = padBuffer(ciphertext, ctx.blockCipher.BlockSize()) + + cbc := cipher.NewCBCEncrypter(ctx.blockCipher, nonce) + + cbc.CryptBlocks(ciphertext, ciphertext) + authtag := ctx.computeAuthTag(data, nonce, ciphertext) + + ret, out := resize(dst, uint64(len(dst))+uint64(len(ciphertext))+uint64(len(authtag))) + copy(out, ciphertext) + copy(out[len(ciphertext):], authtag) + + return ret +} + +// Open decrypts and authenticates the ciphertext. +func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { + if len(ciphertext) < ctx.authtagBytes { + return nil, errors.New("square/go-jose: invalid ciphertext (too short)") + } + + offset := len(ciphertext) - ctx.authtagBytes + expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) + match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) + if match != 1 { + return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") + } + + cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) + + // Make copy of ciphertext buffer, don't want to modify in place + buffer := append([]byte{}, []byte(ciphertext[:offset])...) + + if len(buffer)%ctx.blockCipher.BlockSize() > 0 { + return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") + } + + cbc.CryptBlocks(buffer, buffer) + + // Remove padding + plaintext, err := unpadBuffer(buffer, ctx.blockCipher.BlockSize()) + if err != nil { + return nil, err + } + + ret, out := resize(dst, uint64(len(dst))+uint64(len(plaintext))) + copy(out, plaintext) + + return ret, nil +} + +// Compute an authentication tag +func (ctx *cbcAEAD) computeAuthTag(aad, nonce, ciphertext []byte) []byte { + buffer := make([]byte, uint64(len(aad))+uint64(len(nonce))+uint64(len(ciphertext))+8) + n := 0 + n += copy(buffer, aad) + n += copy(buffer[n:], nonce) + n += copy(buffer[n:], ciphertext) + binary.BigEndian.PutUint64(buffer[n:], uint64(len(aad))*8) + + // According to documentation, Write() on hash.Hash never fails. + hmac := hmac.New(ctx.hash, ctx.integrityKey) + _, _ = hmac.Write(buffer) + + return hmac.Sum(nil)[:ctx.authtagBytes] +} + +// resize ensures the the given slice has a capacity of at least n bytes. +// If the capacity of the slice is less than n, a new slice is allocated +// and the existing data will be copied. +func resize(in []byte, n uint64) (head, tail []byte) { + if uint64(cap(in)) >= n { + head = in[:n] + } else { + head = make([]byte, n) + copy(head, in) + } + + tail = head[len(in):] + return +} + +// Apply padding +func padBuffer(buffer []byte, blockSize int) []byte { + missing := blockSize - (len(buffer) % blockSize) + ret, out := resize(buffer, uint64(len(buffer))+uint64(missing)) + padding := bytes.Repeat([]byte{byte(missing)}, missing) + copy(out, padding) + return ret +} + +// Remove padding +func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { + if len(buffer)%blockSize != 0 { + return nil, errors.New("square/go-jose: invalid padding") + } + + last := buffer[len(buffer)-1] + count := int(last) + + if count == 0 || count > blockSize || count > len(buffer) { + return nil, errors.New("square/go-jose: invalid padding") + } + + padding := bytes.Repeat([]byte{last}, count) + if !bytes.HasSuffix(buffer, padding) { + return nil, errors.New("square/go-jose: invalid padding") + } + + return buffer[:len(buffer)-count], nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go new file mode 100644 index 00000000000..f62c3bdba5d --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go @@ -0,0 +1,75 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto" + "encoding/binary" + "hash" + "io" +) + +type concatKDF struct { + z, info []byte + i uint32 + cache []byte + hasher hash.Hash +} + +// NewConcatKDF builds a KDF reader based on the given inputs. +func NewConcatKDF(hash crypto.Hash, z, algID, ptyUInfo, ptyVInfo, supPubInfo, supPrivInfo []byte) io.Reader { + buffer := make([]byte, uint64(len(algID))+uint64(len(ptyUInfo))+uint64(len(ptyVInfo))+uint64(len(supPubInfo))+uint64(len(supPrivInfo))) + n := 0 + n += copy(buffer, algID) + n += copy(buffer[n:], ptyUInfo) + n += copy(buffer[n:], ptyVInfo) + n += copy(buffer[n:], supPubInfo) + copy(buffer[n:], supPrivInfo) + + hasher := hash.New() + + return &concatKDF{ + z: z, + info: buffer, + hasher: hasher, + cache: []byte{}, + i: 1, + } +} + +func (ctx *concatKDF) Read(out []byte) (int, error) { + copied := copy(out, ctx.cache) + ctx.cache = ctx.cache[copied:] + + for copied < len(out) { + ctx.hasher.Reset() + + // Write on a hash.Hash never fails + _ = binary.Write(ctx.hasher, binary.BigEndian, ctx.i) + _, _ = ctx.hasher.Write(ctx.z) + _, _ = ctx.hasher.Write(ctx.info) + + hash := ctx.hasher.Sum(nil) + chunkCopied := copy(out[copied:], hash) + copied += chunkCopied + ctx.cache = hash[chunkCopied:] + + ctx.i++ + } + + return copied, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go new file mode 100644 index 00000000000..093c646740b --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go @@ -0,0 +1,86 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "encoding/binary" +) + +// DeriveECDHES derives a shared encryption key using ECDH/ConcatKDF as described in JWE/JWA. +// It is an error to call this function with a private/public key that are not on the same +// curve. Callers must ensure that the keys are valid before calling this function. Output +// size may be at most 1<<16 bytes (64 KiB). +func DeriveECDHES(alg string, apuData, apvData []byte, priv *ecdsa.PrivateKey, pub *ecdsa.PublicKey, size int) []byte { + if size > 1<<16 { + panic("ECDH-ES output size too large, must be less than or equal to 1<<16") + } + + // algId, partyUInfo, partyVInfo inputs must be prefixed with the length + algID := lengthPrefixed([]byte(alg)) + ptyUInfo := lengthPrefixed(apuData) + ptyVInfo := lengthPrefixed(apvData) + + // suppPubInfo is the encoded length of the output size in bits + supPubInfo := make([]byte, 4) + binary.BigEndian.PutUint32(supPubInfo, uint32(size)*8) + + if !priv.PublicKey.Curve.IsOnCurve(pub.X, pub.Y) { + panic("public key not on same curve as private key") + } + + z, _ := priv.Curve.ScalarMult(pub.X, pub.Y, priv.D.Bytes()) + zBytes := z.Bytes() + + // Note that calling z.Bytes() on a big.Int may strip leading zero bytes from + // the returned byte array. This can lead to a problem where zBytes will be + // shorter than expected which breaks the key derivation. Therefore we must pad + // to the full length of the expected coordinate here before calling the KDF. + octSize := dSize(priv.Curve) + if len(zBytes) != octSize { + zBytes = append(bytes.Repeat([]byte{0}, octSize-len(zBytes)), zBytes...) + } + + reader := NewConcatKDF(crypto.SHA256, zBytes, algID, ptyUInfo, ptyVInfo, supPubInfo, []byte{}) + key := make([]byte, size) + + // Read on the KDF will never fail + _, _ = reader.Read(key) + + return key +} + +// dSize returns the size in octets for a coordinate on a elliptic curve. +func dSize(curve elliptic.Curve) int { + order := curve.Params().P + bitLen := order.BitLen() + size := bitLen / 8 + if bitLen%8 != 0 { + size++ + } + return size +} + +func lengthPrefixed(data []byte) []byte { + out := make([]byte, len(data)+4) + binary.BigEndian.PutUint32(out, uint32(len(data))) + copy(out[4:], data) + return out +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go new file mode 100644 index 00000000000..1d36d501510 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go @@ -0,0 +1,109 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package josecipher + +import ( + "crypto/cipher" + "crypto/subtle" + "encoding/binary" + "errors" +) + +var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} + +// KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. +func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { + if len(cek)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := len(cek) / 8 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], cek[i*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer, defaultIV) + + for t := 0; t < 6*n; t++ { + copy(buffer[8:], r[t%n]) + + block.Encrypt(buffer, buffer) + + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(r[t%n], buffer[8:]) + } + + out := make([]byte, (n+1)*8) + copy(out, buffer[:8]) + for i := range r { + copy(out[(i+1)*8:], r[i]) + } + + return out, nil +} + +// KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. +func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { + if len(ciphertext)%8 != 0 { + return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + } + + n := (len(ciphertext) / 8) - 1 + r := make([][]byte, n) + + for i := range r { + r[i] = make([]byte, 8) + copy(r[i], ciphertext[(i+1)*8:]) + } + + buffer := make([]byte, 16) + tBytes := make([]byte, 8) + copy(buffer[:8], ciphertext[:8]) + + for t := 6*n - 1; t >= 0; t-- { + binary.BigEndian.PutUint64(tBytes, uint64(t+1)) + + for i := 0; i < 8; i++ { + buffer[i] = buffer[i] ^ tBytes[i] + } + copy(buffer[8:], r[t%n]) + + block.Decrypt(buffer, buffer) + + copy(r[t%n], buffer[8:]) + } + + if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { + return nil, errors.New("square/go-jose: failed to unwrap key") + } + + out := make([]byte, n*8) + for i := range r { + copy(out[i*8:], r[i]) + } + + return out, nil +} diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/gopkg.in/square/go-jose.v2/crypter.go new file mode 100644 index 00000000000..d24cabf6b6f --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/crypter.go @@ -0,0 +1,541 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + "reflect" + + "gopkg.in/square/go-jose.v2/json" +) + +// Encrypter represents an encrypter which produces an encrypted JWE object. +type Encrypter interface { + Encrypt(plaintext []byte) (*JSONWebEncryption, error) + EncryptWithAuthData(plaintext []byte, aad []byte) (*JSONWebEncryption, error) + Options() EncrypterOptions +} + +// A generic content cipher +type contentCipher interface { + keySize() int + encrypt(cek []byte, aad, plaintext []byte) (*aeadParts, error) + decrypt(cek []byte, aad []byte, parts *aeadParts) ([]byte, error) +} + +// A key generator (for generating/getting a CEK) +type keyGenerator interface { + keySize() int + genKey() ([]byte, rawHeader, error) +} + +// A generic key encrypter +type keyEncrypter interface { + encryptKey(cek []byte, alg KeyAlgorithm) (recipientInfo, error) // Encrypt a key +} + +// A generic key decrypter +type keyDecrypter interface { + decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) // Decrypt a key +} + +// A generic encrypter based on the given key encrypter and content cipher. +type genericEncrypter struct { + contentAlg ContentEncryption + compressionAlg CompressionAlgorithm + cipher contentCipher + recipients []recipientKeyInfo + keyGenerator keyGenerator + extraHeaders map[HeaderKey]interface{} +} + +type recipientKeyInfo struct { + keyID string + keyAlg KeyAlgorithm + keyEncrypter keyEncrypter +} + +// EncrypterOptions represents options that can be set on new encrypters. +type EncrypterOptions struct { + Compression CompressionAlgorithm + + // Optional map of additional keys to be inserted into the protected header + // of a JWS object. Some specifications which make use of JWS like to insert + // additional values here. All values must be JSON-serializable. + ExtraHeaders map[HeaderKey]interface{} +} + +// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it +// if necessary. It returns itself and so can be used in a fluent style. +func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions { + if eo.ExtraHeaders == nil { + eo.ExtraHeaders = map[HeaderKey]interface{}{} + } + eo.ExtraHeaders[k] = v + return eo +} + +// WithContentType adds a content type ("cty") header and returns the updated +// EncrypterOptions. +func (eo *EncrypterOptions) WithContentType(contentType ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderContentType, contentType) +} + +// WithType adds a type ("typ") header and returns the updated EncrypterOptions. +func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions { + return eo.WithHeader(HeaderType, typ) +} + +// Recipient represents an algorithm/key to encrypt messages to. +// +// PBES2Count and PBES2Salt correspond with the "p2c" and "p2s" headers used +// on the password-based encryption algorithms PBES2-HS256+A128KW, +// PBES2-HS384+A192KW, and PBES2-HS512+A256KW. If they are not provided a safe +// default of 100000 will be used for the count and a 128-bit random salt will +// be generated. +type Recipient struct { + Algorithm KeyAlgorithm + Key interface{} + KeyID string + PBES2Count int + PBES2Salt []byte +} + +// NewEncrypter creates an appropriate encrypter based on the key type +func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions) (Encrypter, error) { + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: getContentCipher(enc), + } + if opts != nil { + encrypter.compressionAlg = opts.Compression + encrypter.extraHeaders = opts.ExtraHeaders + } + + if encrypter.cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + + var keyID string + var rawKey interface{} + switch encryptionKey := rcpt.Key.(type) { + case JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case *JSONWebKey: + keyID, rawKey = encryptionKey.KeyID, encryptionKey.Key + case OpaqueKeyEncrypter: + keyID, rawKey = encryptionKey.KeyID(), encryptionKey + default: + rawKey = encryptionKey + } + + switch rcpt.Algorithm { + case DIRECT: + // Direct encryption mode must be treated differently + if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) { + return nil, ErrUnsupportedKeyType + } + if encrypter.cipher.keySize() != len(rawKey.([]byte)) { + return nil, ErrInvalidKeySize + } + encrypter.keyGenerator = staticKeyGenerator{ + key: rawKey.([]byte), + } + recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + case ECDH_ES: + // ECDH-ES (w/o key wrapping) is similar to DIRECT mode + typeOf := reflect.TypeOf(rawKey) + if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) { + return nil, ErrUnsupportedKeyType + } + encrypter.keyGenerator = ecKeyGenerator{ + size: encrypter.cipher.keySize(), + algID: string(enc), + publicKey: rawKey.(*ecdsa.PublicKey), + } + recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey)) + recipientInfo.keyID = keyID + if rcpt.KeyID != "" { + recipientInfo.keyID = rcpt.KeyID + } + encrypter.recipients = []recipientKeyInfo{recipientInfo} + return encrypter, nil + default: + // Can just add a standard recipient + encrypter.keyGenerator = randomKeyGenerator{ + size: encrypter.cipher.keySize(), + } + err := encrypter.addRecipient(rcpt) + return encrypter, err + } +} + +// NewMultiEncrypter creates a multi-encrypter based on the given parameters +func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *EncrypterOptions) (Encrypter, error) { + cipher := getContentCipher(enc) + + if cipher == nil { + return nil, ErrUnsupportedAlgorithm + } + if rcpts == nil || len(rcpts) == 0 { + return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") + } + + encrypter := &genericEncrypter{ + contentAlg: enc, + recipients: []recipientKeyInfo{}, + cipher: cipher, + keyGenerator: randomKeyGenerator{ + size: cipher.keySize(), + }, + } + + if opts != nil { + encrypter.compressionAlg = opts.Compression + } + + for _, recipient := range rcpts { + err := encrypter.addRecipient(recipient) + if err != nil { + return nil, err + } + } + + return encrypter, nil +} + +func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { + var recipientInfo recipientKeyInfo + + switch recipient.Algorithm { + case DIRECT, ECDH_ES: + return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + } + + recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) + if recipient.KeyID != "" { + recipientInfo.keyID = recipient.KeyID + } + + switch recipient.Algorithm { + case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: + if sr, ok := recipientInfo.keyEncrypter.(*symmetricKeyCipher); ok { + sr.p2c = recipient.PBES2Count + sr.p2s = recipient.PBES2Salt + } + } + + if err == nil { + ctx.recipients = append(ctx.recipients, recipientInfo) + } + return err +} + +func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKeyInfo, error) { + switch encryptionKey := encryptionKey.(type) { + case *rsa.PublicKey: + return newRSARecipient(alg, encryptionKey) + case *ecdsa.PublicKey: + return newECDHRecipient(alg, encryptionKey) + case []byte: + return newSymmetricRecipient(alg, encryptionKey) + case string: + return newSymmetricRecipient(alg, []byte(encryptionKey)) + case *JSONWebKey: + recipient, err := makeJWERecipient(alg, encryptionKey.Key) + recipient.keyID = encryptionKey.KeyID + return recipient, err + } + if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok { + return newOpaqueKeyEncrypter(alg, encrypter) + } + return recipientKeyInfo{}, ErrUnsupportedKeyType +} + +// newDecrypter creates an appropriate decrypter based on the key type +func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) { + switch decryptionKey := decryptionKey.(type) { + case *rsa.PrivateKey: + return &rsaDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case *ecdsa.PrivateKey: + return &ecDecrypterSigner{ + privateKey: decryptionKey, + }, nil + case []byte: + return &symmetricKeyCipher{ + key: decryptionKey, + }, nil + case string: + return &symmetricKeyCipher{ + key: []byte(decryptionKey), + }, nil + case JSONWebKey: + return newDecrypter(decryptionKey.Key) + case *JSONWebKey: + return newDecrypter(decryptionKey.Key) + } + if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok { + return &opaqueKeyDecrypter{decrypter: okd}, nil + } + return nil, ErrUnsupportedKeyType +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) Encrypt(plaintext []byte) (*JSONWebEncryption, error) { + return ctx.EncryptWithAuthData(plaintext, nil) +} + +// Implementation of encrypt method producing a JWE object. +func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWebEncryption, error) { + obj := &JSONWebEncryption{} + obj.aad = aad + + obj.protected = &rawHeader{} + err := obj.protected.set(headerEncryption, ctx.contentAlg) + if err != nil { + return nil, err + } + + obj.recipients = make([]recipientInfo, len(ctx.recipients)) + + if len(ctx.recipients) == 0 { + return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + } + + cek, headers, err := ctx.keyGenerator.genKey() + if err != nil { + return nil, err + } + + obj.protected.merge(&headers) + + for i, info := range ctx.recipients { + recipient, err := info.keyEncrypter.encryptKey(cek, info.keyAlg) + if err != nil { + return nil, err + } + + err = recipient.header.set(headerAlgorithm, info.keyAlg) + if err != nil { + return nil, err + } + + if info.keyID != "" { + err = recipient.header.set(headerKeyID, info.keyID) + if err != nil { + return nil, err + } + } + obj.recipients[i] = recipient + } + + if len(ctx.recipients) == 1 { + // Move per-recipient headers into main protected header if there's + // only a single recipient. + obj.protected.merge(obj.recipients[0].header) + obj.recipients[0].header = nil + } + + if ctx.compressionAlg != NONE { + plaintext, err = compress(ctx.compressionAlg, plaintext) + if err != nil { + return nil, err + } + + err = obj.protected.set(headerCompression, ctx.compressionAlg) + if err != nil { + return nil, err + } + } + + for k, v := range ctx.extraHeaders { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + (*obj.protected)[k] = makeRawMessage(b) + } + + authData := obj.computeAuthData() + parts, err := ctx.cipher.encrypt(cek, authData, plaintext) + if err != nil { + return nil, err + } + + obj.iv = parts.iv + obj.ciphertext = parts.ciphertext + obj.tag = parts.tag + + return obj, nil +} + +func (ctx *genericEncrypter) Options() EncrypterOptions { + return EncrypterOptions{ + Compression: ctx.compressionAlg, + ExtraHeaders: ctx.extraHeaders, + } +} + +// Decrypt and validate the object and return the plaintext. Note that this +// function does not support multi-recipient, if you desire multi-recipient +// decryption use DecryptMulti instead. +func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) { + headers := obj.mergedHeaders(nil) + + if len(obj.recipients) > 1 { + return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") + } + + critical, err := headers.getCritical() + if err != nil { + return nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return nil, err + } + + cipher := getContentCipher(headers.getEncryption()) + if cipher == nil { + return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + var plaintext []byte + recipient := obj.recipients[0] + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + } + + if plaintext == nil { + return nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + return plaintext, err +} + +// DecryptMulti decrypts and validates the object and returns the plaintexts, +// with support for multiple recipients. It returns the index of the recipient +// for which the decryption was successful, the merged headers for that recipient, +// and the plaintext. +func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) { + globalHeaders := obj.mergedHeaders(nil) + + critical, err := globalHeaders.getCritical() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") + } + + if len(critical) > 0 { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") + } + + decrypter, err := newDecrypter(decryptionKey) + if err != nil { + return -1, Header{}, nil, err + } + + encryption := globalHeaders.getEncryption() + cipher := getContentCipher(encryption) + if cipher == nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) + } + + generator := randomKeyGenerator{ + size: cipher.keySize(), + } + + parts := &aeadParts{ + iv: obj.iv, + ciphertext: obj.ciphertext, + tag: obj.tag, + } + + authData := obj.computeAuthData() + + index := -1 + var plaintext []byte + var headers rawHeader + + for i, recipient := range obj.recipients { + recipientHeaders := obj.mergedHeaders(&recipient) + + cek, err := decrypter.decryptKey(recipientHeaders, &recipient, generator) + if err == nil { + // Found a valid CEK -- let's try to decrypt. + plaintext, err = cipher.decrypt(cek, authData, parts) + if err == nil { + index = i + headers = recipientHeaders + break + } + } + } + + if plaintext == nil || err != nil { + return -1, Header{}, nil, ErrCryptoFailure + } + + // The "zip" header parameter may only be present in the protected header. + if comp := obj.protected.getCompression(); comp != "" { + plaintext, err = decompress(comp, plaintext) + } + + sanitized, err := headers.sanitized() + if err != nil { + return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) + } + + return index, sanitized, plaintext, err +} diff --git a/vendor/gopkg.in/square/go-jose.v2/cryptosigner/cryptosigner.go b/vendor/gopkg.in/square/go-jose.v2/cryptosigner/cryptosigner.go new file mode 100644 index 00000000000..0ec98768eb1 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/cryptosigner/cryptosigner.go @@ -0,0 +1,138 @@ +/*- + * Copyright 2018 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package cryptosigner implements an OpaqueSigner that wraps a "crypto".Signer +// +// https://godoc.org/crypto#Signer +package cryptosigner + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "encoding/asn1" + "io" + "math/big" + + "golang.org/x/crypto/ed25519" + "gopkg.in/square/go-jose.v2" +) + +// Opaque creates an OpaqueSigner from a "crypto".Signer +func Opaque(s crypto.Signer) jose.OpaqueSigner { + pk := &jose.JSONWebKey{ + Key: s.Public(), + } + return &cryptoSigner{signer: s, rand: rand.Reader, pk: pk} +} + +type cryptoSigner struct { + pk *jose.JSONWebKey + signer crypto.Signer + rand io.Reader +} + +func (s *cryptoSigner) Public() *jose.JSONWebKey { + return s.pk +} + +func (s *cryptoSigner) Algs() []jose.SignatureAlgorithm { + switch s.signer.Public().(type) { + case ed25519.PublicKey: + return []jose.SignatureAlgorithm{jose.EdDSA} + case *ecdsa.PublicKey: + // This could be more precise + return []jose.SignatureAlgorithm{jose.ES256, jose.ES384, jose.ES512} + case *rsa.PublicKey: + return []jose.SignatureAlgorithm{jose.RS256, jose.RS384, jose.RS512, jose.PS256, jose.PS384, jose.PS512} + default: + return nil + } +} + +func (s *cryptoSigner) SignPayload(payload []byte, alg jose.SignatureAlgorithm) ([]byte, error) { + var hash crypto.Hash + switch alg { + case jose.EdDSA: + case jose.RS256, jose.PS256, jose.ES256: + hash = crypto.SHA256 + case jose.RS384, jose.PS384, jose.ES384: + hash = crypto.SHA384 + case jose.RS512, jose.PS512, jose.ES512: + hash = crypto.SHA512 + default: + return nil, jose.ErrUnsupportedAlgorithm + } + + var hashed []byte + if hash != crypto.Hash(0) { + hasher := hash.New() + if _, err := hasher.Write(payload); err != nil { + return nil, err + } + hashed = hasher.Sum(nil) + } + + var ( + out []byte + err error + ) + switch alg { + case jose.EdDSA: + out, err = s.signer.Sign(s.rand, payload, crypto.Hash(0)) + case jose.ES256, jose.ES384, jose.ES512: + var byteLen int + switch alg { + case jose.ES256: + byteLen = 32 + case jose.ES384: + byteLen = 48 + case jose.ES512: + byteLen = 66 + } + var b []byte + b, err = s.signer.Sign(s.rand, hashed, hash) + if err != nil { + return nil, err + } + + sig := struct { + R, S *big.Int + }{} + if _, err = asn1.Unmarshal(b, &sig); err != nil { + return nil, err + } + + rBytes := sig.R.Bytes() + rBytesPadded := make([]byte, byteLen) + copy(rBytesPadded[byteLen-len(rBytes):], rBytes) + + sBytes := sig.S.Bytes() + sBytesPadded := make([]byte, byteLen) + copy(sBytesPadded[byteLen-len(sBytes):], sBytes) + + out = append(rBytesPadded, sBytesPadded...) + case jose.RS256, jose.RS384, jose.RS512: + out, err = s.signer.Sign(s.rand, hashed, hash) + case jose.PS256, jose.PS384, jose.PS512: + out, err = s.signer.Sign(s.rand, hashed, &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: hash, + }) + } + return out, err +} diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/gopkg.in/square/go-jose.v2/doc.go new file mode 100644 index 00000000000..dd1387f3f06 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/doc.go @@ -0,0 +1,27 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + +Package jose aims to provide an implementation of the Javascript Object Signing +and Encryption set of standards. It implements encryption and signing based on +the JSON Web Encryption and JSON Web Signature standards, with optional JSON +Web Token support available in a sub-package. The library supports both the +compact and full serialization formats, and has optional support for multiple +recipients. + +*/ +package jose diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/gopkg.in/square/go-jose.v2/encoding.go new file mode 100644 index 00000000000..70f7385c419 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/encoding.go @@ -0,0 +1,185 @@ +/*- + * Copyright 2014 Square Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jose + +import ( + "bytes" + "compress/flate" + "encoding/base64" + "encoding/binary" + "io" + "math/big" + "strings" + "unicode" + + "gopkg.in/square/go-jose.v2/json" +) + +// Helper function to serialize known-good objects. +// Precondition: value is not a nil pointer. +func mustSerializeJSON(value interface{}) []byte { + out, err := json.Marshal(value) + if err != nil { + panic(err) + } + // We never want to serialize the top-level value "null," since it's not a + // valid JOSE message. But if a caller passes in a nil pointer to this method, + // MarshalJSON will happily serialize it as the top-level value "null". If + // that value is then embedded in another operation, for instance by being + // base64-encoded and fed as input to a signing algorithm + // (https://github.com/square/go-jose/issues/22), the result will be + // incorrect. Because this method is intended for known-good objects, and a nil + // pointer is not a known-good object, we are free to panic in this case. + // Note: It's not possible to directly check whether the data pointed at by an + // interface is a nil pointer, so we do this hacky workaround. + // https://groups.google.com/forum/#!topic/golang-nuts/wnH302gBa4I + if string(out) == "null" { + panic("Tried to serialize a nil pointer.") + } + return out +} + +// Strip all newlines and whitespace +func stripWhitespace(data string) string { + buf := strings.Builder{} + buf.Grow(len(data)) + for _, r := range data { + if !unicode.IsSpace(r) { + buf.WriteRune(r) + } + } + return buf.String() +} + +// Perform compression based on algorithm +func compress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return deflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Perform decompression based on algorithm +func decompress(algorithm CompressionAlgorithm, input []byte) ([]byte, error) { + switch algorithm { + case DEFLATE: + return inflate(input) + default: + return nil, ErrUnsupportedAlgorithm + } +} + +// Compress with DEFLATE +func deflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + + // Writing to byte buffer, err is always nil + writer, _ := flate.NewWriter(output, 1) + _, _ = io.Copy(writer, bytes.NewBuffer(input)) + + err := writer.Close() + return output.Bytes(), err +} + +// Decompress with DEFLATE +func inflate(input []byte) ([]byte, error) { + output := new(bytes.Buffer) + reader := flate.NewReader(bytes.NewBuffer(input)) + + _, err := io.Copy(output, reader) + if err != nil { + return nil, err + } + + err = reader.Close() + return output.Bytes(), err +} + +// byteBuffer represents a slice of bytes that can be serialized to url-safe base64. +type byteBuffer struct { + data []byte +} + +func newBuffer(data []byte) *byteBuffer { + if data == nil { + return nil + } + return &byteBuffer{ + data: data, + } +} + +func newFixedSizeBuffer(data []byte, length int) *byteBuffer { + if len(data) > length { + panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + } + pad := make([]byte, length-len(data)) + return newBuffer(append(pad, data...)) +} + +func newBufferFromInt(num uint64) *byteBuffer { + data := make([]byte, 8) + binary.BigEndian.PutUint64(data, num) + return newBuffer(bytes.TrimLeft(data, "\x00")) +} + +func (b *byteBuffer) MarshalJSON() ([]byte, error) { + return json.Marshal(b.base64()) +} + +func (b *byteBuffer) UnmarshalJSON(data []byte) error { + var encoded string + err := json.Unmarshal(data, &encoded) + if err != nil { + return err + } + + if encoded == "" { + return nil + } + + decoded, err := base64.RawURLEncoding.DecodeString(encoded) + if err != nil { + return err + } + + *b = *newBuffer(decoded) + + return nil +} + +func (b *byteBuffer) base64() string { + return base64.RawURLEncoding.EncodeToString(b.data) +} + +func (b *byteBuffer) bytes() []byte { + // Handling nil here allows us to transparently handle nil slices when serializing. + if b == nil { + return nil + } + return b.data +} + +func (b byteBuffer) bigInt() *big.Int { + return new(big.Int).SetBytes(b.data) +} + +func (b byteBuffer) toInt() int { + return int(b.bigInt().Int64()) +} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE new file mode 100644 index 00000000000..74487567632 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/gopkg.in/square/go-jose.v2/json/README.md new file mode 100644 index 00000000000..86de5e5581f --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/README.md @@ -0,0 +1,13 @@ +# Safe JSON + +This repository contains a fork of the `encoding/json` package from Go 1.6. + +The following changes were made: + +* Object deserialization uses case-sensitive member name matching instead of + [case-insensitive matching](https://www.ietf.org/mail-archive/web/json/current/msg03763.html). + This is to avoid differences in the interpretation of JOSE messages between + go-jose and libraries written in other languages. +* When deserializing a JSON object, we check for duplicate keys and reject the + input whenever we detect a duplicate. Rather than trying to work with malformed + data, we prefer to reject it right away. diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/gopkg.in/square/go-jose.v2/json/decode.go new file mode 100644 index 00000000000..37457e5a834 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/decode.go @@ -0,0 +1,1183 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "errors" + "fmt" + "reflect" + "runtime" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. +// Unmarshal will only set exported fields of the struct. +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a string-keyed map, Unmarshal first +// establishes a map to use, If the map is nil, Unmarshal allocates a new map. +// Otherwise Unmarshal reuses the existing map, keeping existing entries. +// Unmarshal then stores key-value pairs from the JSON object into the map. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +// +func Unmarshal(data []byte, v interface{}) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + var d decodeState + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes +} + +func (e *UnmarshalTypeError) Error() string { + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// (No longer used; kept for compatibility.) +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + d.value(rv) + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// isValidNumber reports whether s is a valid JSON number literal. +func isValidNumber(s string) bool { + // This function implements the JSON numbers grammar. + // See https://tools.ietf.org/html/rfc7159#section-6 + // and http://json.org/number.gif + + if s == "" { + return false + } + + // Optional - + if s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + + // Digits + switch { + default: + return false + + case s[0] == '0': + s = s[1:] + + case '1' <= s[0] && s[0] <= '9': + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // . followed by 1 or more digits. + if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' { + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + s = s[1:] + if s[0] == '+' || s[0] == '-' { + s = s[1:] + if s == "" { + return false + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + } + } + + // Make sure we are at the end. + return s == "" +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // read offset in data + scan scanner + nextscan scanner // for calls to nextValue + savedError error + useNumber bool +} + +// errPhase is used for errors that should not happen unless +// there is a bug in the JSON decoder or something is editing +// the data slice while the decoder executes. +var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + return d +} + +// error aborts the decoding by panicking with err. +func (d *decodeState) error(err error) { + panic(err) +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = err + } +} + +// next cuts off and returns the next full JSON value in d.data[d.off:]. +// The next value is known to be an object or array, not a literal. +func (d *decodeState) next() []byte { + c := d.data[d.off] + item, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // Our scanner has seen the opening brace/bracket + // and thinks we're still in the middle of the object. + // invent a closing brace/bracket to get it out. + if c == '{' { + d.scan.step(&d.scan, '}') + } else { + d.scan.step(&d.scan, ']') + } + + return item +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +// It updates d.off and returns the new scan code. +func (d *decodeState) scanWhile(op int) int { + var newOp int + for { + if d.off >= len(d.data) { + newOp = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } else { + c := d.data[d.off] + d.off++ + newOp = d.scan.step(&d.scan, c) + } + if newOp != op { + break + } + } + return newOp +} + +// value decodes a JSON value from d.data[d.off:] into the value. +// it updates d.off to point past the decoded value. +func (d *decodeState) value(v reflect.Value) { + if !v.IsValid() { + _, rest, err := nextValue(d.data[d.off:], &d.nextscan) + if err != nil { + d.error(err) + } + d.off = len(d.data) - len(rest) + + // d.scan thinks we're still at the beginning of the item. + // Feed in an empty string - the shortest, simplest value - + // so that it knows we got to the end of the value. + if d.scan.redo { + // rewind. + d.scan.redo = false + d.scan.step = stateBeginValue + } + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + + n := len(d.scan.parseState) + if n > 0 && d.scan.parseState[n-1] == parseObjectKey { + // d.scan thinks we just read an object key; finish the object + d.scan.step(&d.scan, ':') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '"') + d.scan.step(&d.scan, '}') + } + + return + } + + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(v) + + case scanBeginObject: + d.object(v) + + case scanBeginLiteral: + d.literal(v) + } +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() interface{} { + switch op := d.scanWhile(scanSkipSpace); op { + default: + d.error(errPhase) + + case scanBeginArray: + d.array(reflect.Value{}) + + case scanBeginObject: + d.object(reflect.Value{}) + + case scanBeginLiteral: + switch v := d.literalInterface().(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into the value v. +// the first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + } + + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + v.Set(reflect.ValueOf(d.arrayInterface())) + return + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) + d.off-- + d.next() + return + case reflect.Array: + case reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + d.value(v.Index(i)) + } else { + // Ran out of fixed array: skip. + d.value(reflect.Value{}) + } + i++ + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } +} + +var nullLiteral = []byte("null") + +// object consumes an object from d.data[d.off-1:], decoding into the value v. +// the first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) { + // Check for unmarshaler. + u, ut, pv := d.indirect(v, false) + if u != nil { + d.off-- + err := u.UnmarshalJSON(d.next()) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + v = pv + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + v.Set(reflect.ValueOf(d.objectInterface())) + return + } + + // Check type of target: struct or map[string]T + switch v.Kind() { + case reflect.Map: + // map must have string kind + t := v.Type() + if t.Key().Kind() != reflect.String { + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + + default: + d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) + d.off-- + d.next() // skip over { } in input + return + } + + var mapElem reflect.Value + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := v.Type().Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + fields := cachedTypeFields(v.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, []byte(key)) { + f = ff + break + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Ptr { + if subv.IsNil() { + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + } + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + d.literalStore(nullLiteral, subv, false) + case string: + d.literalStore([]byte(qv), subv, true) + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + d.value(subv) + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kv := reflect.ValueOf(key).Convert(v.Type().Key()) + v.SetMapIndex(kv, subv) + } + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } +} + +// literal consumes a literal from d.data[d.off-1:], decoding into the value v. +// The first byte of the literal has been read already +// (that's how the caller knows it's a literal). +func (d *decodeState) literal(v reflect.Value) { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + + d.literalStore(d.data[start:d.off], v, false) +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (interface{}, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return + } + wantptr := item[0] == 'n' // null + u, ut, pv := d.indirect(v, wantptr) + if u != nil { + err := u.UnmarshalJSON(item) + if err != nil { + d.error(err) + } + return + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + return + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + err := ut.UnmarshalText(s) + if err != nil { + d.error(err) + } + return + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + switch v.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := c == 't' + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(errPhase) + } + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + v.SetString(s) + if !isValidNumber(s) { + d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)) + } + break + } + if fromQuoted { + d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + } + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) + break + } + v.SetFloat(n) + } + } +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() interface{} { + switch d.scanWhile(scanSkipSpace) { + default: + d.error(errPhase) + panic("unreachable") + case scanBeginArray: + return d.arrayInterface() + case scanBeginObject: + return d.objectInterface() + case scanBeginLiteral: + return d.literalInterface() + } +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []interface{} { + var v = make([]interface{}, 0) + for { + // Look ahead for ] - can only happen on first iteration. + op := d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + + // Back up so d.value can have the byte we just read. + d.off-- + d.scan.undo(op) + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + op = d.scanWhile(scanSkipSpace) + if op == scanEndArray { + break + } + if op != scanArrayValue { + d.error(errPhase) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]interface{} { + m := make(map[string]interface{}) + keys := map[string]bool{} + + for { + // Read opening " of string key or closing }. + op := d.scanWhile(scanSkipSpace) + if op == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if op != scanBeginLiteral { + d.error(errPhase) + } + + // Read string key. + start := d.off - 1 + op = d.scanWhile(scanContinue) + item := d.data[start : d.off-1] + key, ok := unquote(item) + if !ok { + d.error(errPhase) + } + + // Check for duplicate keys. + _, ok = keys[key] + if !ok { + keys[key] = true + } else { + d.error(fmt.Errorf("json: duplicate key '%s' in object", key)) + } + + // Read : before value. + if op == scanSkipSpace { + op = d.scanWhile(scanSkipSpace) + } + if op != scanObjectKey { + d.error(errPhase) + } + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + op = d.scanWhile(scanSkipSpace) + if op == scanEndObject { + break + } + if op != scanObjectValue { + d.error(errPhase) + } + } + return m +} + +// literalInterface is like literal but returns an interface value. +func (d *decodeState) literalInterface() interface{} { + // All bytes inside literal return scanContinue op code. + start := d.off - 1 + op := d.scanWhile(scanContinue) + + // Scan read one byte too far; back up. + d.off-- + d.scan.undo(op) + item := d.data[start:d.off] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + d.error(errPhase) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + d.error(errPhase) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/gopkg.in/square/go-jose.v2/json/encode.go new file mode 100644 index 00000000000..1dae8bb7cd8 --- /dev/null +++ b/vendor/gopkg.in/square/go-jose.v2/json/encode.go @@ -0,0 +1,1197 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON objects as defined in +// RFC 4627. The mapping between JSON objects and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" +// to keep some browsers from misinterpreting JSON output as HTML. +// Ampersand "&" is also escaped to "\u0026" for the same reason. +// +// Array and slice values encode as JSON arrays, except that +// []byte encodes as a base64-encoded string, and a nil slice +// encodes as the null JSON object. +// +// Struct values encode as JSON objects. Each exported struct field +// becomes a member of the object unless +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or string of +// length zero. The object's default key string is the struct field name +// but can be specified in the struct field's tag value. The "json" key in +// the struct field's tag value is the key name, followed by an optional comma +// and options. Examples: +// +// // Field is ignored by this package. +// Field int `json:"-"` +// +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` +// +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` +// +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` +// +// The "string" option signals that a field is stored as JSON inside a +// JSON-encoded string. It applies only to fields of string, floating point, +// integer, or boolean types. This extra level of encoding is sometimes used +// when communicating with JavaScript programs: +// +// Int64String int64 `json:",string"` +// +// The key name will be used if it's a non-empty string consisting of +// only Unicode letters, digits, dollar signs, percent signs, hyphens, +// underscores and slashes. +// +// Anonymous struct fields are usually marshaled as if their inner exported fields +// were fields in the outer struct, subject to the usual Go visibility rules amended +// as described in the next paragraph. +// An anonymous struct field with a name given in its JSON tag is treated as +// having that name, rather than being anonymous. +// An anonymous struct field of interface type is treated the same as having +// that type as its name, rather than being anonymous. +// +// The Go visibility rules for struct fields are amended for JSON when +// deciding which field to marshal or unmarshal. If there are +// multiple fields at the same level, and that level is the least +// nested (and would therefore be the nesting level selected by the +// usual Go rules), the following extra rules apply: +// +// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, +// even if there are multiple untagged fields that would otherwise conflict. +// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. +// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. +// +// Handling of anonymous struct fields is new in Go 1.1. +// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of +// an anonymous struct field in both current and earlier versions, give the field +// a JSON tag of "-". +// +// Map values encode as JSON objects. +// The map's key type must be string; the map keys are used as JSON object +// keys, subject to the UTF-8 coercion described for string values above. +// +// Pointer values encode as the value pointed to. +// A nil pointer encodes as the null JSON object. +// +// Interface values encode as the value contained in the interface. +// A nil interface value encodes as the null JSON object. +// +// Channel, complex, and function values cannot be encoded in JSON. +// Attempting to encode such a value causes Marshal to return +// an UnsupportedTypeError. +// +// JSON cannot represent cyclic data structures and Marshal does not +// handle them. Passing cyclic structures to Marshal will result in +// an infinite recursion. +// +func Marshal(v interface{}) ([]byte, error) { + e := &encodeState{} + err := e.marshal(v) + if err != nil { + return nil, err + } + return e.Bytes(), nil +} + +// MarshalIndent is like Marshal but applies Indent to format the output. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + b, err := Marshal(v) + if err != nil { + return nil, err + } + var buf bytes.Buffer + err = Indent(&buf, b, prefix, indent) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 +// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 +// so that the JSON will be safe to embed inside HTML