diff --git a/config/100-deployment.yaml b/config/100-deployment.yaml index 298762b1ea..1a0898146a 100644 --- a/config/100-deployment.yaml +++ b/config/100-deployment.yaml @@ -92,6 +92,9 @@ spec: mountPath: /etc/signing-secrets - name: oidc-info mountPath: /var/run/sigstore/cosign + - name: spiffe-workload-api + mountPath: /spiffe-workload-api + readOnly: true env: - name: SYSTEM_NAMESPACE valueFrom: @@ -120,3 +123,6 @@ spec: path: oidc-token expirationSeconds: 600 # Use as short-lived as possible. audience: sigstore + - name: spiffe-workload-api + csi: + driver: "csi.spiffe.io" diff --git a/go.mod b/go.mod index 22af3b67ff..ecf9aee6db 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,8 @@ go 1.17 replace k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c +replace github.com/tektoncd/pipeline v0.38.1 => ../pipeline + require ( cloud.google.com/go/compute v1.7.0 cloud.google.com/go/storage v1.24.0 @@ -44,7 +46,7 @@ require ( github.com/sigstore/rekor v0.5.0 github.com/sigstore/sigstore v1.2.1-0.20220424143412-3d41663116d5 github.com/spiffe/go-spiffe/v2 v2.1.1 - github.com/tektoncd/pipeline v0.37.2 + github.com/tektoncd/pipeline v0.38.1 github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 go.uber.org/atomic v1.9.0 @@ -137,7 +139,7 @@ require ( github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect github.com/chrismellard/docker-credential-acr-env v0.0.0-20220119192733-fe33c00cee21 // indirect - github.com/cloudevents/sdk-go/v2 v2.5.0 // indirect + github.com/cloudevents/sdk-go/v2 v2.10.1 // indirect github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect @@ -347,6 +349,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.12.0 // indirect + github.com/spiffe/spire-api-sdk v1.3.2 // indirect github.com/src-d/gcfg v1.4.0 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect diff --git a/go.sum b/go.sum index 3806c94bfe..5c92df8e68 100644 --- a/go.sum +++ b/go.sum @@ -260,6 +260,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -561,8 +562,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/v2 v2.5.0 h1:Ts6aLHbBUJfcNcZ4ouAfJ4+Np7SE1Yf2w4ADKRCd7Fo= -github.com/cloudevents/sdk-go/v2 v2.5.0/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U= +github.com/cloudevents/sdk-go/v2 v2.10.1 h1:qNFovJ18fWOd8Q9ydWJPk1oiFudXyv1GxJIP7MwPjuM= +github.com/cloudevents/sdk-go/v2 v2.10.1/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -604,6 +605,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -625,8 +627,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= -github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= -github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -1672,7 +1674,7 @@ github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJk github.com/jedisct1/go-minisign v0.0.0-20210703085342-c1f07ee84431/go.mod h1:3VIJLjlf5Iako82IX/5KOoCzDmogK5mO+bl+DRItnR8= github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b h1:ZGiXF8sz7PDk6RgkP+A/SFfUD0ZR/AgG6SpRNEDKZy8= github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b/go.mod h1:hQmNrgofl+IY/8L+n20H6E6PWBBTokdsv+q49j0QhsU= -github.com/jenkins-x/go-scm v1.10.10/go.mod h1:z7xTO9/VzqW3xEbEMH2z5cpOGrZ8+nOHOWfU1ngFGxs= +github.com/jenkins-x/go-scm v1.11.19/go.mod h1:eIcty4+tf6E7ycGOg0cUqnaLP+1LH1Z8zncQFQqRa3E= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= @@ -2424,6 +2426,8 @@ github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiu github.com/spiffe/go-spiffe/v2 v2.1.0/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= github.com/spiffe/go-spiffe/v2 v2.1.1 h1:RT9kM8MZLZIsPTH+HKQEP5yaAk3yd/VBzlINaRjXs8k= github.com/spiffe/go-spiffe/v2 v2.1.1/go.mod h1:5qg6rpqlwIub0JAiF1UK9IMD6BpPTmvG6yfSgDBs5lg= +github.com/spiffe/spire-api-sdk v1.3.2 h1:8F5HQGm3jDL6amuxxeQcH8Rqs6/WOwaLt6h0LTU6uYA= +github.com/spiffe/spire-api-sdk v1.3.2/go.mod h1:73BC0cOGkqRQrqoB1Djk7etxN+bE1ypmzZMkhCQs6kY= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= @@ -2474,8 +2478,6 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= -github.com/tektoncd/pipeline v0.37.2 h1:JIp410ktvJPkprPqK0sgUGpRlZosy2B0C1jbUTwWd9c= -github.com/tektoncd/pipeline v0.37.2/go.mod h1:ZZOSGj1vCeK/xONQGcxBs+m17NzCXNNOqglCDhOPwjY= github.com/tektoncd/plumbing v0.0.0-20220304154415-13228ac1f4a4/go.mod h1:b9esRuV1absBvaPzKkjYdKXjC5Tgs8/vh1sz++RiTdc= github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f h1:74OqGOB+R3aobu2j9fxKnsgupS9c7KH1bZaR5LSXICw= github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f/go.mod h1:b9esRuV1absBvaPzKkjYdKXjC5Tgs8/vh1sz++RiTdc= diff --git a/pkg/chains/formats/format.go b/pkg/chains/formats/format.go index 4709c27f76..4fd0142672 100644 --- a/pkg/chains/formats/format.go +++ b/pkg/chains/formats/format.go @@ -13,9 +13,18 @@ limitations under the License. package formats +import ( + "context" + + "github.com/pkg/errors" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + "go.uber.org/zap" +) + // Payloader is an interface to generate a chains Payload from a TaskRun type Payloader interface { - CreatePayload(obj interface{}) (interface{}, error) + CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) Type() PayloadType Wrap() bool } @@ -30,3 +39,18 @@ const ( ) var AllFormatters = []PayloadType{PayloadTypeTekton, PayloadTypeSimpleSigning, PayloadTypeInTotoIte6} + +func VerifySpire(ctx context.Context, tr *v1beta1.TaskRun, spireControllerAPI spire.ControllerAPIClient, logger *zap.SugaredLogger) error { + + if !tr.IsTaskRunResultVerified() { + return errors.New("taskrun status condition not verified. Spire taskrun results verification failure") + } + logger.Info("spire taskrun status condition verified") + + if err := spireControllerAPI.VerifyStatusInternalAnnotation(ctx, tr, logger); err != nil { + return errors.Wrap(err, "verifying SPIRE") + } + logger.Info("internal status annotation verified by spire") + + return nil +} diff --git a/pkg/chains/formats/format_test.go b/pkg/chains/formats/format_test.go new file mode 100644 index 0000000000..0616116313 --- /dev/null +++ b/pkg/chains/formats/format_test.go @@ -0,0 +1,238 @@ +/* +Copyright 2020 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package formats + +import ( + "context" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + logtesting "knative.dev/pkg/logging/testing" +) + +func TestVerifySpire(t *testing.T) { + spireMockClient := &spire.MockClient{} + var ( + cc spire.ControllerAPIClient = spireMockClient + ) + + ctx := context.Background() + + testCases := []struct { + // description of test + desc string + // function to tamper + tamperCondition apis.Condition + // annotations to set + setAnnotations map[string]string + // whether sign/verify procedure should succeed + success bool + }{ + { + desc: "non-intrusive tamper with status annotation", + tamperCondition: apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + }, + setAnnotations: map[string]string{ + "unrelated-hash": "change", + }, + success: true, + }, + { + desc: "tamper status hash annotation", + tamperCondition: apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + }, + setAnnotations: map[string]string{ + spire.TaskRunStatusHashAnnotation: "change-hash", + }, + success: false, + }, + { + desc: "tamper condition fail", + tamperCondition: apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionFalse, + }, + success: false, + }, + { + desc: "Spire not verified", + tamperCondition: apis.Condition{ + Type: apis.ConditionType(v1beta1.TaskRunConditionResultsVerified.String()), + Status: corev1.ConditionTrue, + }, + setAnnotations: map[string]string{ + spire.NotVerifiedAnnotation: "yes", + }, + success: false, + }, + } + + for _, tt := range testCases { + for _, tr := range testTaskRuns() { + + success := func() bool { + err := cc.AppendStatusInternalAnnotation(ctx, tr) + if err != nil { + return false + } + + if tr.Status.Status.Annotations == nil { + tr.Status.Status.Annotations = map[string]string{} + } + + if tt.setAnnotations != nil { + for k, v := range tt.setAnnotations { + tr.Status.Status.Annotations[k] = v + } + } + + tr.Status.Status.Conditions = append(tr.Status.Status.Conditions, tt.tamperCondition) + + err = VerifySpire(ctx, tr, cc, logtesting.TestLogger(t)) + return err == nil + }() + + if success != tt.success { + t.Fatalf("test %v expected verify %v, got %v", tt.desc, tt.success, success) + } + } + } +} + +func objectMeta(name, ns string) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: map[string]string{}, + Annotations: map[string]string{}, + } +} + +func testTaskRuns() []*v1beta1.TaskRun { + return []*v1beta1.TaskRun{ + // taskRun 1 + { + ObjectMeta: objectMeta("taskrun-example", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{ + Name: "taskname", + APIVersion: "a1", + }, + ServiceAccountName: "test-sa", + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + }, + }, + }, + // taskRun 2 + { + ObjectMeta: objectMeta("taskrun-example-populated", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + // taskRun 3 + { + ObjectMeta: objectMeta("taskrun-example-with-objmeta", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + // taskRun 4 + { + ObjectMeta: objectMeta("taskrun-example-with-objmeta-annotations", "foo"), + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "unit-test-task"}, + ServiceAccountName: "test-sa", + Resources: &v1beta1.TaskRunResources{}, + Timeout: &metav1.Duration{Duration: config.DefaultTimeoutMinutes * time.Minute}, + }, + Status: v1beta1.TaskRunStatus{ + Status: duckv1beta1.Status{ + Conditions: duckv1beta1.Conditions{ + apis.Condition{ + Type: apis.ConditionSucceeded, + }, + }, + Annotations: map[string]string{ + "annotation1": "a1value", + "annotation2": "a2value", + }, + }, + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ExitCode: int32(0)}, + }, + }}, + }, + }, + }, + } +} diff --git a/pkg/chains/formats/intotoite6/intotoite6.go b/pkg/chains/formats/intotoite6/intotoite6.go index 4c85b42a83..46f0ea2ba7 100644 --- a/pkg/chains/formats/intotoite6/intotoite6.go +++ b/pkg/chains/formats/intotoite6/intotoite6.go @@ -17,6 +17,7 @@ limitations under the License. package intotoite6 import ( + "context" "fmt" "sort" "strings" @@ -27,8 +28,9 @@ import ( "github.com/tektoncd/chains/pkg/artifacts" "github.com/tektoncd/chains/pkg/chains/formats" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/spire" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" "go.uber.org/zap" "github.com/google/go-containerregistry/pkg/name" @@ -42,26 +44,42 @@ const ( ) type InTotoIte6 struct { - builderID string - logger *zap.SugaredLogger + builderID string + logger *zap.SugaredLogger + spireEnabled bool + spireControllerAPI spire.ControllerAPIClient } -func NewFormatter(cfg config.Config, logger *zap.SugaredLogger) (formats.Payloader, error) { - return &InTotoIte6{ - builderID: cfg.Builder.ID, - logger: logger, - }, nil +func NewFormatter(ctx context.Context, cfg config.Config, logger *zap.SugaredLogger) (formats.Payloader, error) { + + intotoIte6 := &InTotoIte6{ + builderID: cfg.Builder.ID, + logger: logger, + spireEnabled: cfg.SPIRE.Enabled, + spireControllerAPI: spire.GetControllerAPIClient(ctx), + } + + intotoIte6.spireControllerAPI.SetConfig((spireconfig.SpireConfig{ + SocketPath: cfg.SPIRE.SocketPath, + })) + + return intotoIte6, nil } func (i *InTotoIte6) Wrap() bool { return true } -func (i *InTotoIte6) CreatePayload(obj interface{}) (interface{}, error) { +func (i *InTotoIte6) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { var tr *v1beta1.TaskRun switch v := obj.(type) { case *v1beta1.TaskRun: tr = v + if i.spireEnabled { + if err := formats.VerifySpire(ctx, tr, i.spireControllerAPI, i.logger); err != nil { + return nil, err + } + } default: return nil, fmt.Errorf("intoto does not support type: %s", v) } @@ -177,7 +195,7 @@ func GetSubjectDigests(tr *v1beta1.TaskRun, logger *zap.SugaredLogger) []intoto. continue } // similarly, we could do this for other pipeline resources or whatever thing replaces them - if output.PipelineResourceBinding.ResourceSpec.Type == v1alpha1.PipelineResourceTypeImage { + if output.PipelineResourceBinding.ResourceSpec.Type == v1beta1.PipelineResourceTypeImage { // get the url and digest, and save as a subject var url, digest string for _, s := range tr.Status.ResourcesResult { @@ -224,7 +242,7 @@ func materials(tr *v1beta1.TaskRun) []slsa.ProvenanceMaterial { // check for a Git PipelineResource for _, input := range tr.Spec.Resources.Inputs { - if input.ResourceSpec == nil || input.ResourceSpec.Type != v1alpha1.PipelineResourceTypeGit { + if input.ResourceSpec == nil || input.ResourceSpec.Type != v1beta1.PipelineResourceTypeGit { continue } diff --git a/pkg/chains/formats/intotoite6/intotoite6_test.go b/pkg/chains/formats/intotoite6/intotoite6_test.go index c8c7ac7413..3abe9f5b4b 100644 --- a/pkg/chains/formats/intotoite6/intotoite6_test.go +++ b/pkg/chains/formats/intotoite6/intotoite6_test.go @@ -17,6 +17,7 @@ limitations under the License. package intotoite6 import ( + "context" "encoding/json" "io/ioutil" "testing" @@ -29,6 +30,7 @@ import ( "github.com/in-toto/in-toto-golang/in_toto" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" logtesting "knative.dev/pkg/logging/testing" ) @@ -37,7 +39,7 @@ var e1BuildFinished = time.Unix(1617011415, 0) func TestCreatePayload1(t *testing.T) { tr := taskrunFromFile(t, "testdata/taskrun1.json") - + ctx, _ := ttesting.SetupDefaultContext(t) cfg := config.Config{ Builder: config.BuilderConfig{ ID: "test_builder-1", @@ -103,9 +105,9 @@ func TestCreatePayload1(t *testing.T) { }, }, } - i, _ := NewFormatter(cfg, logtesting.TestLogger(t)) + i, _ := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) - got, err := i.CreatePayload(tr) + got, err := i.CreatePayload(context.Background(), tr) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -117,6 +119,7 @@ func TestCreatePayload1(t *testing.T) { func TestCreatePayload2(t *testing.T) { tr := taskrunFromFile(t, "testdata/taskrun2.json") + ctx, _ := ttesting.SetupDefaultContext(t) cfg := config.Config{ Builder: config.BuilderConfig{ ID: "test_builder-2", @@ -150,8 +153,8 @@ func TestCreatePayload2(t *testing.T) { }, }, } - i, _ := NewFormatter(cfg, logtesting.TestLogger(t)) - got, err := i.CreatePayload(tr) + i, _ := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) + got, err := i.CreatePayload(context.Background(), tr) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -163,15 +166,16 @@ func TestCreatePayload2(t *testing.T) { func TestCreatePayloadNilTaskRef(t *testing.T) { tr := taskrunFromFile(t, "testdata/taskrun1.json") + ctx, _ := ttesting.SetupDefaultContext(t) tr.Spec.TaskRef = nil cfg := config.Config{ Builder: config.BuilderConfig{ ID: "testid", }, } - f, _ := NewFormatter(cfg, logtesting.TestLogger(t)) + f, _ := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) - p, err := f.CreatePayload(tr) + p, err := f.CreatePayload(context.Background(), tr) if err != nil { t.Errorf("unexpected error: %s", err.Error()) } @@ -184,6 +188,7 @@ func TestCreatePayloadNilTaskRef(t *testing.T) { func TestMultipleSubjects(t *testing.T) { tr := taskrunFromFile(t, "testdata/taskrun-multiple-subjects.json") + ctx, _ := ttesting.SetupDefaultContext(t) cfg := config.Config{ Builder: config.BuilderConfig{ ID: "test_builder-multiple", @@ -230,8 +235,8 @@ func TestMultipleSubjects(t *testing.T) { }, } - i, _ := NewFormatter(cfg, logtesting.TestLogger(t)) - got, err := i.CreatePayload(tr) + i, _ := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) + got, err := i.CreatePayload(context.Background(), tr) if err != nil { t.Errorf("unexpected error: %s", err.Error()) } @@ -241,13 +246,14 @@ func TestMultipleSubjects(t *testing.T) { } func TestNewFormatter(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) t.Run("Ok", func(t *testing.T) { cfg := config.Config{ Builder: config.BuilderConfig{ ID: "testid", }, } - f, err := NewFormatter(cfg, logtesting.TestLogger(t)) + f, err := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) if f == nil { t.Error("Failed to create formatter") } @@ -258,15 +264,16 @@ func TestNewFormatter(t *testing.T) { } func TestCreatePayloadError(t *testing.T) { + ctx, _ := ttesting.SetupDefaultContext(t) cfg := config.Config{ Builder: config.BuilderConfig{ ID: "testid", }, } - f, _ := NewFormatter(cfg, logtesting.TestLogger(t)) + f, _ := NewFormatter(ctx, cfg, logtesting.TestLogger(t)) t.Run("Invalid type", func(t *testing.T) { - p, err := f.CreatePayload("not a task ref") + p, err := f.CreatePayload(context.Background(), "not a task ref") if p != nil { t.Errorf("Unexpected payload") diff --git a/pkg/chains/formats/simple/simple.go b/pkg/chains/formats/simple/simple.go index cc45573f79..6fd68097b7 100644 --- a/pkg/chains/formats/simple/simple.go +++ b/pkg/chains/formats/simple/simple.go @@ -14,6 +14,7 @@ limitations under the License. package simple import ( + "context" "fmt" "github.com/sigstore/sigstore/pkg/signature/payload" @@ -30,7 +31,7 @@ type SimpleSigning struct { type SimpleContainerImage payload.SimpleContainerImage // CreatePayload implements the Payloader interface. -func (i *SimpleSigning) CreatePayload(obj interface{}) (interface{}, error) { +func (i *SimpleSigning) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { switch v := obj.(type) { case name.Digest: format := NewSimpleStruct(v) diff --git a/pkg/chains/formats/simple/simple_test.go b/pkg/chains/formats/simple/simple_test.go index efcbfd8480..a3be8461a0 100644 --- a/pkg/chains/formats/simple/simple_test.go +++ b/pkg/chains/formats/simple/simple_test.go @@ -14,6 +14,7 @@ limitations under the License. package simple import ( + "context" "reflect" "testing" @@ -61,7 +62,7 @@ func TestSimpleSigning_CreatePayload(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { i := &SimpleSigning{} - got, err := i.CreatePayload(tt.obj) + got, err := i.CreatePayload(context.Background(), tt.obj) if (err != nil) != tt.wantErr { t.Errorf("SimpleSigning.CreatePayload() error = %v, wantErr %v", err, tt.wantErr) return @@ -81,7 +82,7 @@ func TestImageName(t *testing.T) { obj := makeDigest(t, img) i := &SimpleSigning{} - format, err := i.CreatePayload(obj) + format, err := i.CreatePayload(context.Background(), obj) if err != nil { t.Fatal(err) } diff --git a/pkg/chains/formats/tekton/tekton.go b/pkg/chains/formats/tekton/tekton.go index b47393dc36..43b82c0ddf 100644 --- a/pkg/chains/formats/tekton/tekton.go +++ b/pkg/chains/formats/tekton/tekton.go @@ -14,31 +14,54 @@ limitations under the License. package tekton import ( + "context" "fmt" "github.com/tektoncd/chains/pkg/chains/formats" + "github.com/tektoncd/chains/pkg/config" + "github.com/tektoncd/pipeline/pkg/spire" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) // Tekton is a formatter that just captures the TaskRun Status with no modifications. type Tekton struct { + logger *zap.SugaredLogger + spireEnabled bool + spireControllerAPI spire.ControllerAPIClient } -func NewFormatter() (formats.Payloader, error) { - return &Tekton{}, nil +func NewFormatter(ctx context.Context, cfg config.Config, l *zap.SugaredLogger) (formats.Payloader, error) { + tekton := &Tekton{ + logger: l, + spireEnabled: cfg.SPIRE.Enabled, + spireControllerAPI: spire.GetControllerAPIClient(ctx), + } + + tekton.spireControllerAPI.SetConfig((spireconfig.SpireConfig{ + SocketPath: cfg.SPIRE.SocketPath, + })) + + return tekton, nil } // CreatePayload implements the Payloader interface. -func (i *Tekton) CreatePayload(obj interface{}) (interface{}, error) { - +func (i *Tekton) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { + var tr *v1beta1.TaskRun switch v := obj.(type) { case *v1beta1.TaskRun: + tr = v + if i.spireEnabled { + if err := formats.VerifySpire(ctx, tr, i.spireControllerAPI, i.logger); err != nil { + return nil, err + } + } return v.Status, nil default: return nil, fmt.Errorf("unsupported type %s", v) } - } func (i *Tekton) Type() formats.PayloadType { diff --git a/pkg/chains/formats/tekton/tekton_test.go b/pkg/chains/formats/tekton/tekton_test.go index dd9bd8ecb9..ce29f22fb1 100644 --- a/pkg/chains/formats/tekton/tekton_test.go +++ b/pkg/chains/formats/tekton/tekton_test.go @@ -14,28 +14,46 @@ limitations under the License. package tekton import ( + "context" "reflect" "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" + "github.com/tektoncd/pipeline/pkg/spire" + "knative.dev/pkg/logging" ) func TestTekton_CreatePayload(t *testing.T) { + spireMockClient := &spire.MockClient{} + ctx, _ := ttesting.SetupDefaultContext(t) + logger := logging.FromContext(ctx) + var ( + cc spire.ControllerAPIClient = spireMockClient + ) + tests := []struct { - name string - tr *v1beta1.TaskRun + name string + tr *v1beta1.TaskRun + tekton *Tekton }{ { name: "tr", tr: &v1beta1.TaskRun{ Status: v1beta1.TaskRunStatus{}, }, + tekton: &Tekton{ + logger: logger, + spireEnabled: false, + spireControllerAPI: cc, + }, }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - i := &Tekton{} - got, err := i.CreatePayload(tt.tr) + + got, err := tt.tekton.CreatePayload(context.Background(), tt.tr) if err != nil { t.Errorf("Tekton.CreatePayload() error = %v", err) return diff --git a/pkg/chains/signing.go b/pkg/chains/signing.go index 2f8a47e2bd..68e77e8beb 100644 --- a/pkg/chains/signing.go +++ b/pkg/chains/signing.go @@ -80,13 +80,13 @@ func allSigners(ctx context.Context, sp string, cfg config.Config, l *zap.Sugare return all } -func AllFormatters(cfg config.Config, l *zap.SugaredLogger) map[formats.PayloadType]formats.Payloader { +func AllFormatters(ctx context.Context, cfg config.Config, l *zap.SugaredLogger) map[formats.PayloadType]formats.Payloader { all := map[formats.PayloadType]formats.Payloader{} for _, f := range formats.AllFormatters { switch f { case formats.PayloadTypeTekton: - formatter, err := tekton.NewFormatter() + formatter, err := tekton.NewFormatter(ctx, cfg, l) if err != nil { l.Warnf("error configuring tekton formatter: %s", err) } @@ -98,7 +98,7 @@ func AllFormatters(cfg config.Config, l *zap.SugaredLogger) map[formats.PayloadT } all[f] = formatter case formats.PayloadTypeInTotoIte6: - formatter, err := intotoite6.NewFormatter(cfg, l) + formatter, err := intotoite6.NewFormatter(ctx, cfg, l) if err != nil { l.Warnf("error configuring intoto formatter: %s", err) } @@ -149,9 +149,10 @@ func (ts *TaskRunSigner) SignTaskRun(ctx context.Context, tr *v1beta1.TaskRun) e // Go through each object one at a time. for _, obj := range objects { - payload, err := payloader.CreatePayload(obj) + payload, err := payloader.CreatePayload(ctx, obj) if err != nil { logger.Error(err) + merr = multierror.Append(merr, err) continue } logger.Infof("Created payload of type %s for TaskRun %s/%s", string(payloadFormat), tr.Namespace, tr.Name) diff --git a/pkg/chains/signing_test.go b/pkg/chains/signing_test.go index ca234c2d5a..3e35ebd036 100644 --- a/pkg/chains/signing_test.go +++ b/pkg/chains/signing_test.go @@ -178,7 +178,7 @@ func TestTaskRunSigner_SignTaskRun(t *testing.T) { logger := logging.FromContext(ctx) ts := &TaskRunSigner{ - Formatters: AllFormatters(*cfg, logger), + Formatters: AllFormatters(ctx, *cfg, logger), Backends: fakeAllBackends(tt.backends), SecretPath: "./signing/x509/testdata/", Pipelineclientset: ps, @@ -251,7 +251,7 @@ func TestTaskRunSigner_Transparency(t *testing.T) { logger := logging.FromContext(ctx) ts := &TaskRunSigner{ - Formatters: AllFormatters(*cfg, logger), + Formatters: AllFormatters(ctx, *cfg, logger), Backends: fakeAllBackends(backends), SecretPath: "./signing/x509/testdata/", Pipelineclientset: ps, diff --git a/pkg/config/config.go b/pkg/config/config.go index 18715ad565..d9bcbee18b 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -32,6 +32,7 @@ type Config struct { Signers SignerConfigs Builder BuilderConfig Transparency TransparencyConfig + SPIRE SPIREConfig } // ArtifactConfigs contains the configuration for how to sign/store/format the signatures for each artifact type @@ -67,6 +68,11 @@ type BuilderConfig struct { ID string } +type SPIREConfig struct { + Enabled bool + SocketPath string +} + type X509Signer struct { FulcioEnabled bool FulcioAddr string @@ -187,6 +193,10 @@ const ( transparencyEnabledKey = "transparency.enabled" transparencyURLKey = "transparency.url" + // SPIRE config + spireEnabledKey = "spire.enabled" + spireSocketPath = "spire.socketPath" + ChainsConfig = "chains-config" ) @@ -220,6 +230,10 @@ func defaultConfig() *Config { Builder: BuilderConfig{ ID: "https://tekton.dev/chains/v2", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, } } @@ -258,6 +272,10 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { oneOf(transparencyEnabledKey, &cfg.Transparency.VerifyAnnotation, "manual"), asString(transparencyURLKey, &cfg.Transparency.URL), + // Spire config + asBool(spireEnabledKey, &cfg.SPIRE.Enabled), + asString(spireSocketPath, &cfg.SPIRE.SocketPath), + asString(kmsSignerKMSRef, &cfg.Signers.KMS.KMSRef), asString(kmsAuthAddress, &cfg.Signers.KMS.Auth.Address), asString(kmsAuthToken, &cfg.Signers.KMS.Auth.Token), diff --git a/pkg/config/store_test.go b/pkg/config/store_test.go index 56609f73c0..ea7622aa39 100644 --- a/pkg/config/store_test.go +++ b/pkg/config/store_test.go @@ -124,6 +124,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -151,6 +155,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -178,6 +186,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -205,6 +217,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -232,6 +248,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -259,6 +279,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -286,6 +310,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -313,6 +341,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -342,6 +374,10 @@ func TestParse(t *testing.T) { VerifyAnnotation: true, URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { @@ -369,6 +405,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { name: "fulcio", @@ -405,6 +445,10 @@ func TestParse(t *testing.T) { Transparency: TransparencyConfig{ URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { name: "rekor - true", @@ -439,6 +483,10 @@ func TestParse(t *testing.T) { Enabled: true, URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, { name: "rekor - manual", @@ -474,6 +522,48 @@ func TestParse(t *testing.T) { VerifyAnnotation: true, URL: "https://rekor.sigstore.dev", }, + SPIRE: SPIREConfig{ + Enabled: false, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, + }, + }, { + name: "spire enabled", + data: map[string]string{ + "spire.enabled": "true", + "spire.socketPath": "unix:///spiffe-workload-api/spire-agent.sock", + }, + taskrunEnabled: true, + ociEnbaled: true, + want: Config{ + Builder: BuilderConfig{ + "https://tekton.dev/chains/v2", + }, + Artifacts: ArtifactConfigs{ + TaskRuns: Artifact{ + Format: "tekton", + Signer: "x509", + StorageBackend: sets.NewString("tekton"), + }, + OCI: Artifact{ + Format: "simplesigning", + StorageBackend: sets.NewString("oci"), + Signer: "x509", + }, + }, + Signers: SignerConfigs{ + X509: X509Signer{ + FulcioAddr: "https://fulcio.sigstore.dev", + FulcioOIDCIssuer: "https://oauth2.sigstore.dev/auth", + }, + }, + Transparency: TransparencyConfig{ + URL: "https://rekor.sigstore.dev", + }, + SPIRE: SPIREConfig{ + Enabled: true, + SocketPath: "unix:///spiffe-workload-api/spire-agent.sock", + }, }, }, } @@ -489,6 +579,9 @@ func TestParse(t *testing.T) { if got.Artifacts.TaskRuns.Enabled() != tt.taskrunEnabled { t.Errorf("Taskrun artifact enable mismatch") } + if got.SPIRE.Enabled != tt.want.SPIRE.Enabled { + t.Errorf("Spire enabled mismatch") + } if diff := cmp.Diff(*got, tt.want); diff != "" { t.Errorf("parse() = %v", diff) } diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index 8c10d259f8..fa2addd068 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -49,7 +49,7 @@ func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl cfg := *value.(*config.Config) // get all formatters for formatting payload - tsSigner.Formatters = chains.AllFormatters(cfg, logger) + tsSigner.Formatters = chains.AllFormatters(ctx, cfg, logger) // get all backends for storing provenance backends, err := storage.InitializeBackends(ctx, pipelineClient, kubeClient, logger, cfg) diff --git a/test/e2e-common.sh b/test/e2e-common.sh index 15f96552a7..a1280dce3d 100755 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -29,6 +29,7 @@ RELEASE_YAML=${RELEASE_YAML:-} source $(dirname $0)/../vendor/github.com/tektoncd/plumbing/scripts/e2e-tests.sh +# install_tkn installs tekton cli function install_tkn() { echo ">> Installing tkn" TKN_VERSION=0.20.0 @@ -38,6 +39,7 @@ function install_tkn() { tar xvzf tkn_$TKN_VERSION_Linux_x86_64.tar.gz -C /usr/local/bin/ tkn } +# install_pipeline_crd installs tekton pipelines function install_pipeline_crd() { local latestreleaseyaml echo ">> Deploying Tekton Pipelines" @@ -59,61 +61,120 @@ function install_pipeline_crd() { wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up" } -function install_chains() { - echo ">> Deploying Tekton Chains" - ko apply -f config/ || fail_test "Tekton Chains installation failed" - - # Wait for pods to be running in the namespaces we are deploying to - wait_until_pods_running tekton-chains || fail_test "Tekton Chains did not come up" -} - -function chains_patch_spire() { - kubectl patch -n tekton-chains deployment tekton-chains-controller \ - --patch-file "$(dirname $0)/testdata/chains-patch-spire.json" - # Wait for pods to be running in the namespaces we are deploying to - wait_until_pods_running tekton-chains || fail_test "Tekton Chains did not come up after patching" -} - -function dump_logs() { - echo ">> Tekton Chains Logs" - kubectl logs deployment/tekton-chains-controller -n tekton-chains -} - +# spire_apply is used to create spire registration entries into the spire server function spire_apply() { if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then echo "spire_apply requires a spiffeID as the first arg" >&2 exit 1 fi - show=$(kubectl exec -n spire spire-server-0 -c spire-server -- \ + show=$(kubectl exec -n spire deployment/spire-server -- \ /opt/spire/bin/spire-server entry show $1 $2) if [ "$show" != "Found 0 entries" ]; then # delete to recreate entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:) - kubectl exec -n spire spire-server-0 -c spire-server -- \ + kubectl exec -n spire deployment/spire-server -- \ /opt/spire/bin/spire-server entry delete -entryID $entryid fi - kubectl exec -n spire spire-server-0 -c spire-server -- \ + kubectl exec -n spire deployment/spire-server -- \ /opt/spire/bin/spire-server entry create "$@" } +# install_spire uses the vendored spire deployment yamls to install spire server, agent and the CSI driver. +# once the server is running, registering the spire agent node, pipeline controller and chains controller function install_spire() { echo ">> Deploying Spire" - kubectl create ns spire --dry-run=client -o yaml | kubectl apply -f - - kubectl -n spire apply -f "$(dirname $0)/testdata/spire.yaml" - wait_until_pods_running spire || fail_test "Spire did not come up" + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + echo "Creating SPIRE namespace..." + kubectl create ns spire + + echo "Applying SPIFFE CSI Driver configuration..." + kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml + + echo "Deploying SPIRE server" + kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml + + echo "Deploying SPIRE agent" + kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml + + wait_until_pods_running spire || fail_test "SPIRE did not come up" + spire_apply \ -spiffeID spiffe://example.org/ns/spire/node/example \ - -selector k8s_psat:cluster:example \ + -selector k8s_psat:cluster:example-cluster \ -selector k8s_psat:agent_ns:spire \ -selector k8s_psat:agent_sa:spire-agent \ -node + spire_apply \ + -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \ + -parentID spiffe://example.org/ns/spire/node/example \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller \ + -admin spire_apply \ -spiffeID spiffe://example.org/ns/tekton-chains/sa/tekton-chains-controller \ -parentID spiffe://example.org/ns/spire/node/example \ -selector k8s:ns:tekton-chains \ + -selector k8s:pod-label:app:tekton-chains-controller \ -selector k8s:sa:tekton-chains-controller } +# patch_pipline_spire patches the pipeline controller to add in the Spire agent workload API mount +function patch_pipline_spire() { + echo ">> Patching Tekton Pipelines for Spire" + kubectl patch \ + deployment tekton-pipelines-controller \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json + + verify_pipeline_installation +} + +# patch_pipline_CM_spire patches the pipeline feature-flags configMap to enable spire +function patch_pipline_CM_spire() { + echo ">> Patching Tekton Pipelines CM feature-flags for Spire" + kubectl patch \ + cm feature-flags \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-cm-spire.json + + verify_pipeline_installation +} + +# patch_chains_spire patches the Chains controller to add in the Spire agent workload API and vault mount +function patch_chains_spire() { + echo ">> Patching Tekton Chains for Spire" + kubectl patch \ + deployment tekton-chains-controller \ + -n tekton-chains \ + --patch-file "$DIR"/testdata/patch/chains-controller-spire.json + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-chains || fail_test "Tekton Chains did not come up" +} + +function verify_pipeline_installation() { + # Make sure that everything is cleaned up in the current namespace. + delete_pipeline_resources + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-pipelines || fail_test "Tekton Pipeline did not come up" +} + +function install_chains() { + echo ">> Deploying Tekton Chains" + ko apply -f config/ || fail_test "Tekton Chains installation failed" + + # Wait for pods to be running in the namespaces we are deploying to + wait_until_pods_running tekton-chains || fail_test "Tekton Chains did not come up" +} + +function dump_logs() { + echo ">> Tekton Chains Logs" + kubectl logs deployment/tekton-chains-controller -n tekton-chains +} + function vault_exec() { envcmd="" if [ -n "$ROOT_TOKEN" ]; then @@ -167,6 +228,7 @@ EOF vault_exec read -format=json transit/keys/e2e \ | jq -r .data.keys.\"1\".public_key >"$(dirname $0)/testdata/vault.pub" } + function install_kafka() { echo ">> Deploying Kafka" helm repo add bitnami https://charts.bitnami.com/bitnami diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index 773f94dfb8..0bf810cb01 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -25,19 +25,23 @@ initialize $@ header "Setting up environment" +install_spire + # Test against nightly instead of latest. install_tkn export RELEASE_YAML="https://storage.googleapis.com/tekton-releases-nightly/pipeline/latest/release.yaml" install_pipeline_crd -install_chains +patch_pipline_spire -install_spire +patch_pipline_CM_spire + +install_chains install_vault -chains_patch_spire +patch_chains_spire failed=0 diff --git a/test/e2e_test.go b/test/e2e_test.go index dd7942f6db..9a97ab677f 100644 --- a/test/e2e_test.go +++ b/test/e2e_test.go @@ -41,9 +41,10 @@ import ( "cloud.google.com/go/storage" "github.com/tektoncd/chains/pkg/chains" "github.com/tektoncd/chains/pkg/chains/provenance" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" logtesting "knative.dev/pkg/logging/testing" ) @@ -61,19 +62,33 @@ func TestInstall(t *testing.T) { } func TestTektonStorage(t *testing.T) { + tektonStorageTest(t, false) +} + +func TestTektonStorageWithSpire(t *testing.T) { + tektonStorageTest(t, true) +} + +func tektonStorageTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{}) defer cleanup() - // Setup the right config. - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.taskrun.format": "tekton", "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "tekton", "artifacts.oci.format": "simplesigning", "artifacts.oci.signer": "x509", "artifacts.oci.storage": "tekton", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + // Setup the right config. + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() tr, err := c.PipelineClient.TektonV1beta1().TaskRuns(ns).Create(ctx, &imageTaskRun, metav1.CreateOptions{}) @@ -92,12 +107,19 @@ func TestTektonStorage(t *testing.T) { } func TestRekor(t *testing.T) { + rekorTest(t, false) +} + +func TestRekorWithSpire(t *testing.T) { + rekorTest(t, true) +} + +func rekorTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{}) defer cleanup() - // Setup the right config. - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.taskrun.format": "tekton", "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "tekton", @@ -105,7 +127,14 @@ func TestRekor(t *testing.T) { "artifacts.oci.signer": "x509", "artifacts.oci.storage": "tekton", "transparency.enabled": "manual", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + // Setup the right config. + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() tr, err := c.PipelineClient.TektonV1beta1().TaskRuns(ns).Create(ctx, &imageTaskRun, metav1.CreateOptions{}) @@ -126,8 +155,15 @@ func TestRekor(t *testing.T) { // Verify the payload signature. verifySignature(ctx, t, c, tr) } - func TestOCISigning(t *testing.T) { + ociSigningTest(t, false) +} + +func TestOCISigningWithSpire(t *testing.T) { + ociSigningTest(t, true) +} + +func ociSigningTest(t *testing.T, spireEnabled bool) { tests := []struct { name string opts setupOpts @@ -147,8 +183,14 @@ func TestOCISigning(t *testing.T) { c, ns, cleanup := setup(ctx, t, test.opts) defer cleanup() + cm := map[string]string{"artifacts.oci.storage": "tekton", "artifacts.taskrun.format": "tekton"} + + if spireEnabled { + enableCMSpire(cm) + } + // Setup the right config. - resetConfig := setConfigMap(ctx, t, c, map[string]string{"artifacts.oci.storage": "tekton", "artifacts.taskrun.format": "tekton"}) + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() @@ -193,6 +235,14 @@ func TestOCISigning(t *testing.T) { } func TestGCSStorage(t *testing.T) { + gcsStorageTest(t, false) +} + +func TestGCSStorageWithSpire(t *testing.T) { + gcsStorageTest(t, true) +} + +func gcsStorageTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) if metadata.OnGCE() { t.Skip("Skipping, integration tests do not support GCS secrets yet.") @@ -208,11 +258,17 @@ func TestGCSStorage(t *testing.T) { c, ns, cleanup := setup(ctx, t, setupOpts{}) defer cleanup() - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "gcs", "storage.gcs.bucket": bucketName, - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() time.Sleep(3 * time.Second) @@ -232,20 +288,34 @@ func TestGCSStorage(t *testing.T) { } func TestFulcio(t *testing.T) { + fulcioTest(t, false) +} + +func TestFulcioWithSpire(t *testing.T) { + fulcioTest(t, true) +} + +func fulcioTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) if metadata.OnGCE() { t.Skip("Skipping, integration tests do not support workload identity yet.") } c, ns, cleanup := setup(ctx, t, setupOpts{ns: "default"}) defer cleanup() - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.taskrun.storage": "tekton", "artifacts.taskrun.signer": "x509", "artifacts.taskrun.format": "in-toto", "artifacts.oci.signer": "x509", "signers.x509.fulcio.enabled": "true", "transparency.enabled": "false", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() time.Sleep(3 * time.Second) @@ -320,11 +390,19 @@ func (w *reverseDSSEVerifier) VerifySignature(s io.Reader, m io.Reader, opts ... } func TestOCIStorage(t *testing.T) { + ociStorageTest(t, false) +} + +func TestOCIStorageWithSpire(t *testing.T) { + ociStorageTest(t, true) +} + +func ociStorageTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{registry: true}) defer cleanup() - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.oci.format": "simplesigning", "artifacts.oci.storage": "oci", "artifacts.oci.signer": "x509", @@ -332,7 +410,13 @@ func TestOCIStorage(t *testing.T) { "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "oci", "storage.oci.repository.insecure": "true", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() time.Sleep(3 * time.Second) @@ -368,11 +452,19 @@ func TestOCIStorage(t *testing.T) { } func TestMultiBackendStorage(t *testing.T) { + multiBackendStorageTest(t, false) +} + +func TestMultiBackendStorageWithSpire(t *testing.T) { + multiBackendStorageTest(t, true) +} + +func multiBackendStorageTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{registry: true}) defer cleanup() - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.oci.format": "simplesigning", "artifacts.oci.storage": "tekton,oci", "artifacts.oci.signer": "x509", @@ -380,7 +472,13 @@ func TestMultiBackendStorage(t *testing.T) { "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "tekton,oci", "storage.oci.repository.insecure": "true", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() time.Sleep(3 * time.Second) @@ -421,16 +519,30 @@ func TestMultiBackendStorage(t *testing.T) { } func TestRetryFailed(t *testing.T) { + retryFailedTest(t, false) +} + +func TestRetryFailedWithSpire(t *testing.T) { + retryFailedTest(t, true) +} + +func retryFailedTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{registry: true}) defer cleanup() - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ // don't set insecure repository, forcing signature upload to fail "artifacts.oci.storage": "oci", "artifacts.taskrun.storage": "tekton", "storage.oci.repository": "gcr.io/not-real", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() time.Sleep(3 * time.Second) @@ -475,7 +587,7 @@ cat < $(outputs.resources.image.path)/index.json Resources: &v1beta1.TaskResources{ Outputs: []v1beta1.TaskResource{ { - ResourceDeclaration: v1alpha1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ Name: "image", Type: "image", }, @@ -496,14 +608,8 @@ var imageTaskRun = v1beta1.TaskRun{ { PipelineResourceBinding: v1beta1.PipelineResourceBinding{ Name: "image", - ResourceSpec: &v1alpha1.PipelineResourceSpec{ - Type: "image", - Params: []v1alpha1.ResourceParam{ - { - Name: "url", - Value: "gcr.io/foo/bar", - }, - }, + ResourceRef: &v1beta1.PipelineResourceRef{ + Name: "url", }, }, }, @@ -513,16 +619,30 @@ var imageTaskRun = v1beta1.TaskRun{ } func TestProvenanceMaterials(t *testing.T) { + provenanceMaterialsTest(t, false) +} + +func TestProvenanceMaterialsWithSpire(t *testing.T) { + provenanceMaterialsTest(t, true) +} + +func provenanceMaterialsTest(t *testing.T, spireEnabled bool) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{}) defer cleanup() - // Setup the right config. - resetConfig := setConfigMap(ctx, t, c, map[string]string{ + cm := map[string]string{ "artifacts.taskrun.format": "in-toto", "artifacts.taskrun.signer": "x509", "artifacts.taskrun.storage": "tekton", - }) + } + + if spireEnabled { + enableCMSpire(cm) + } + + // Setup the right config. + resetConfig := setConfigMap(ctx, t, c, cm) defer resetConfig() // modify image task run to add in the params we want to check for @@ -579,7 +699,6 @@ func TestProvenanceMaterials(t *testing.T) { t.Fatal(string(d)) } } - func TestVaultKMSSpire(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) c, ns, cleanup := setup(ctx, t, setupOpts{}) @@ -637,3 +756,88 @@ func TestVaultKMSSpire(t *testing.T) { t.Fatal(err) } } + +func TestTaskrunResultsFailuretoVerifybySpire(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + c, ns, cleanup := setup(ctx, t, setupOpts{}) + defer cleanup() + + cm := map[string]string{ + "artifacts.taskrun.format": "in-toto", + "artifacts.taskrun.storage": "tekton", + } + + enableCMSpire(cm) + + // Setup the right config. + resetConfig := setConfigMap(ctx, t, c, cm) + defer resetConfig() + + taskRunName := "failing-taskrun" + + t.Logf("Creating Task and TaskRun in namespace %s", ns) + task := mustParseTask(t, fmt.Sprintf(` +metadata: + name: failing-task + namespace: %s +spec: + steps: + - image: busybox + command: ['/bin/sh'] + args: ['-c', 'echo hello'] + - image: busybox + command: ['/bin/sh'] + args: ['-c', 'exit 1'] + - image: busybox + command: ['/bin/sh'] + args: ['-c', 'sleep 30s'] +`, ns)) + if _, err := c.PipelineClient.TektonV1beta1().Tasks(ns).Create(ctx, task, metav1.CreateOptions{}); err != nil { + t.Fatalf("error creating task: %s", err) + } + taskRun := mustParseTaskRun(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + taskRef: + name: failing-task +`, taskRunName, ns)) + tr, err := c.PipelineClient.TektonV1beta1().TaskRuns(ns).Create(ctx, taskRun, metav1.CreateOptions{}) + if err != nil { + t.Errorf("error creating taskrun: %s", err) + } + + // Give it a minute to complete. + waitForCondition(ctx, t, c.PipelineClient, tr.Name, ns, failed, 60*time.Second) + value, ok := tr.Annotations["chains.tekton.dev/signed"] + if ok && value == "failed" { + t.Error("taskrun failed should not have chains.tekton.dev/signed") + } +} + +// mustParseTask takes YAML and parses it into a *v1beta1.Task +func mustParseTask(t *testing.T, yaml string) *v1beta1.Task { + var task v1beta1.Task + yaml = `apiVersion: tekton.dev/v1beta1 +kind: Task +` + yaml + mustParseYAML(t, yaml, &task) + return &task +} + +// mustParseTaskRun takes YAML and parses it into a *v1beta1.TaskRun +func mustParseTaskRun(t *testing.T, yaml string) *v1beta1.TaskRun { + var tr v1beta1.TaskRun + yaml = `apiVersion: tekton.dev/v1beta1 +kind: TaskRun +` + yaml + mustParseYAML(t, yaml, &tr) + return &tr +} + +func mustParseYAML(t *testing.T, yaml string, i runtime.Object) { + if _, _, err := scheme.Codecs.UniversalDeserializer().Decode([]byte(yaml), nil, i); err != nil { + t.Fatalf("mustParseYAML (%s): %v", yaml, err) + } +} diff --git a/test/examples_test.go b/test/examples_test.go index 9bfe114a32..c86b227fa4 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -62,6 +62,16 @@ func TestExamples(t *testing.T) { cleanUpInTotoFormatter() } +func TestExamplesWithSpire(t *testing.T) { + ctx := context.Background() + c, ns, cleanup := setup(ctx, t, setupOpts{}) + defer cleanup() + + cleanUpInTotoFormatter := setupInTotoFormatterWithSpire(ctx, t, c) + runInTotoFormatterTests(ctx, t, ns, c) + cleanUpInTotoFormatter() +} + func setupInTotoFormatter(ctx context.Context, t *testing.T, c *clients) func() { // Setup the right config. return setConfigMap(ctx, t, c, map[string]string{ @@ -70,6 +80,16 @@ func setupInTotoFormatter(ctx context.Context, t *testing.T, c *clients) func() }) } +func setupInTotoFormatterWithSpire(ctx context.Context, t *testing.T, c *clients) func() { + // Setup the right config. + return setConfigMap(ctx, t, c, map[string]string{ + "artifacts.taskrun.format": "in-toto", + "artifacts.oci.storage": "tekton", + "spire.enabled": "true", + "spire.socketPath": SpireSocketPath, + }) +} + func runInTotoFormatterTests(ctx context.Context, t *testing.T, ns string, c *clients) { t.Parallel() examples := getExamplePaths(t, examplesPath) diff --git a/test/test_utils.go b/test/test_utils.go index 8c121bab36..84b49b08b3 100644 --- a/test/test_utils.go +++ b/test/test_utils.go @@ -39,6 +39,8 @@ import ( "knative.dev/pkg/logging" ) +const SpireSocketPath string = "unix:///spiffe-workload-api/spire-agent.sock" + func getTr(ctx context.Context, t *testing.T, c pipelineclientset.Interface, name, ns string) (tr *v1beta1.TaskRun) { t.Helper() tr, err := c.TektonV1beta1().TaskRuns(ns).Get(ctx, name, metav1.GetOptions{}) @@ -268,3 +270,8 @@ func verifySignature(ctx context.Context, t *testing.T, c *clients, tr *v1beta1. } } } + +func enableCMSpire(cm map[string]string) { + cm["spire.enabled"] = "true" + cm["spire.socketPath"] = SpireSocketPath +} diff --git a/test/testdata/chains-patch-spire.json b/test/testdata/chains-patch-spire.json deleted file mode 100644 index 887f6c1248..0000000000 --- a/test/testdata/chains-patch-spire.json +++ /dev/null @@ -1,55 +0,0 @@ -{ - "spec": { - "template": { - "spec": { - "$setElementOrder/containers": [ - { - "name": "tekton-chains-controller" - } - ], - "$setElementOrder/volumes": [ - { - "name": "signing-secrets" - }, - { - "name": "oidc-info" - }, - { - "name": "spire-agent-socket" - } - ], - "containers": [ - { - "$setElementOrder/volumeMounts": [ - { - "mountPath": "/etc/signing-secrets" - }, - { - "mountPath": "/var/run/sigstore/cosign" - }, - { - "mountPath": "/tmp/spire-agent/public" - } - ], - "name": "tekton-chains-controller", - "volumeMounts": [ - { - "mountPath": "/tmp/spire-agent/public", - "name": "spire-agent-socket" - } - ] - } - ], - "volumes": [ - { - "hostPath": { - "path": "/run/spire/sockets", - "type": "DirectoryOrCreate" - }, - "name": "spire-agent-socket" - } - ] - } - } - } -} \ No newline at end of file diff --git a/test/testdata/patch/chains-controller-spire.json b/test/testdata/patch/chains-controller-spire.json new file mode 100644 index 0000000000..42809defbd --- /dev/null +++ b/test/testdata/patch/chains-controller-spire.json @@ -0,0 +1,55 @@ +{ + "spec":{ + "template":{ + "spec":{ + "$setElementOrder/containers":[ + { + "name":"tekton-chains-controller" + } + ], + "$setElementOrder/volumes":[ + { + "name":"spiffe-workload-api" + }, + { + "name":"signing-secrets" + }, + { + "name":"oidc-info" + } + ], + "containers":[ + { + "$setElementOrder/volumeMounts":[ + { + "mountPath":"/etc/signing-secrets" + }, + { + "mountPath":"/var/run/sigstore/cosign" + }, + { + "mountPath":"/spiffe-workload-api" + } + ], + "name":"tekton-chains-controller", + "volumeMounts":[ + { + "mountPath":"/spiffe-workload-api", + "name":"spiffe-workload-api", + "readOnly":true + } + ] + } + ], + "volumes":[ + { + "csi":{ + "driver":"csi.spiffe.io" + }, + "name":"spiffe-workload-api" + } + ] + } + } + } +} diff --git a/test/testdata/patch/pipeline-cm-spire.json b/test/testdata/patch/pipeline-cm-spire.json new file mode 100644 index 0000000000..889d762da1 --- /dev/null +++ b/test/testdata/patch/pipeline-cm-spire.json @@ -0,0 +1,5 @@ +{ + "data":{ + "enable-spire":"true" + } +} diff --git a/test/testdata/patch/pipeline-controller-spire.json b/test/testdata/patch/pipeline-controller-spire.json new file mode 100644 index 0000000000..c137f675cb --- /dev/null +++ b/test/testdata/patch/pipeline-controller-spire.json @@ -0,0 +1,55 @@ +{ + "spec":{ + "template":{ + "spec":{ + "$setElementOrder/containers":[ + { + "name":"tekton-pipelines-controller" + } + ], + "$setElementOrder/volumes":[ + { + "name":"config-logging" + }, + { + "name":"config-registry-cert" + }, + { + "name":"spiffe-workload-api" + } + ], + "containers":[ + { + "$setElementOrder/volumeMounts":[ + { + "mountPath":"/etc/config-logging" + }, + { + "mountPath":"/etc/config-registry-cert" + }, + { + "mountPath":"/spiffe-workload-api" + } + ], + "name":"tekton-pipelines-controller", + "volumeMounts":[ + { + "mountPath":"/spiffe-workload-api", + "name":"spiffe-workload-api", + "readOnly":true + } + ] + } + ], + "volumes":[ + { + "csi":{ + "driver":"csi.spiffe.io" + }, + "name":"spiffe-workload-api" + } + ] + } + } + } +} diff --git a/test/testdata/spire/spiffe-csi-driver.yaml b/test/testdata/spire/spiffe-csi-driver.yaml new file mode 100644 index 0000000000..e9d07bc568 --- /dev/null +++ b/test/testdata/spire/spiffe-csi-driver.yaml @@ -0,0 +1,20 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: "csi.spiffe.io" +spec: + # Only ephemeral, inline volumes are supported. There is no need for a + # controller to provision and attach volumes. + attachRequired: false + + # Request the pod information which the CSI driver uses to verify that an + # ephemeral mount was requested. + podInfoOnMount: true + + # Don't change ownership on the contents of the mount since the Workload API + # Unix Domain Socket is typically open to all (i.e. 0777). + fsGroupPolicy: None + + # Declare support for ephemeral volumes only. + volumeLifecycleModes: + - Ephemeral diff --git a/test/testdata/spire/spire-agent.yaml b/test/testdata/spire/spire-agent.yaml new file mode 100644 index 0000000000..4e848a5138 --- /dev/null +++ b/test/testdata/spire/spire-agent.yaml @@ -0,0 +1,208 @@ +# ServiceAccount for the SPIRE agent +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-agent + namespace: spire + +--- + +# Required cluster role to allow spire-agent to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role +rules: +- apiGroups: [""] + resources: ["pods", "nodes", "nodes/proxy"] + verbs: ["get"] + +--- + +# Binds above cluster role to spire-agent service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-agent-cluster-role-binding +subjects: +- kind: ServiceAccount + name: spire-agent + namespace: spire +roleRef: + kind: ClusterRole + name: spire-agent-cluster-role + apiGroup: rbac.authorization.k8s.io + + +--- + +# ConfigMap for the SPIRE agent featuring: +# 1) PSAT node attestation +# 2) K8S Workload Attestation over the secure kubelet port +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-agent + namespace: spire +data: + agent.conf: | + agent { + data_dir = "/run/spire" + log_level = "DEBUG" + server_address = "spire-server" + server_port = "8081" + socket_path = "/run/spire/sockets/spire-agent.sock" + trust_bundle_path = "/run/spire/bundle/bundle.crt" + trust_domain = "example.org" + } + + plugins { + NodeAttestor "k8s_psat" { + plugin_data { + cluster = "example-cluster" + } + } + + KeyManager "memory" { + plugin_data { + } + } + + WorkloadAttestor "k8s" { + plugin_data { + skip_kubelet_verification = true + } + } + } + +--- + +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: spire-agent + namespace: spire + labels: + app: spire-agent +spec: + selector: + matchLabels: + app: spire-agent + updateStrategy: + type: RollingUpdate + template: + metadata: + namespace: spire + labels: + app: spire-agent + spec: + hostPID: true + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: spire-agent + containers: + - name: spire-agent + image: ghcr.io/spiffe/spire-agent:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/agent.conf"] + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + - name: spire-bundle + mountPath: /run/spire/bundle + readOnly: true + - name: spire-token + mountPath: /var/run/secrets/tokens + - name: spire-agent-socket-dir + mountPath: /run/spire/sockets + # This is the container which runs the SPIFFE CSI driver. + - name: spiffe-csi-driver + image: ghcr.io/spiffe/spiffe-csi-driver:nightly + imagePullPolicy: IfNotPresent + args: [ + "-workload-api-socket-dir", "/spire-agent-socket", + "-csi-socket-path", "/spiffe-csi/csi.sock", + ] + env: + # The CSI driver needs a unique node ID. The node name can be + # used for this purpose. + - name: MY_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + # The volume containing the SPIRE agent socket. The SPIFFE CSI + # driver will mount this directory into containers. + - mountPath: /spire-agent-socket + name: spire-agent-socket-dir + readOnly: true + # The volume that will contain the CSI driver socket shared + # with the kubelet and the driver registrar. + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The volume containing mount points for containers. + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + securityContext: + privileged: true + # This container runs the CSI Node Driver Registrar which takes care + # of all the little details required to register a CSI driver with + # the kubelet. + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v2.0.1 + imagePullPolicy: IfNotPresent + args: [ + "-csi-address", "/spiffe-csi/csi.sock", + "-kubelet-registration-path", "/var/lib/kubelet/plugins/csi.spiffe.io/csi.sock", + ] + volumeMounts: + # The registrar needs access to the SPIFFE CSI driver socket + - mountPath: /spiffe-csi + name: spiffe-csi-socket-dir + # The registrar needs access to the Kubelet plugin registration + # directory + - name: kubelet-plugin-registration-dir + mountPath: /registration + volumes: + - name: spire-config + configMap: + name: spire-agent + - name: spire-bundle + configMap: + name: spire-bundle + - name: spire-token + projected: + sources: + - serviceAccountToken: + path: spire-agent + expirationSeconds: 7200 + audience: spire-server + # This volume is used to share the Workload API socket between the CSI + # driver and SPIRE agent. Note, an emptyDir volume could also be used, + # however, this can lead to broken bind mounts in the workload + # containers if the agent pod is restarted (since the emptyDir + # directory on the node that was mounted into workload containers by + # the CSI driver belongs to the old pod instance and is no longer + # valid). + - name: spire-agent-socket-dir + hostPath: + path: /run/spire/agent-sockets + type: DirectoryOrCreate + # This volume is where the socket for kubelet->driver communication lives + - name: spiffe-csi-socket-dir + hostPath: + path: /var/lib/kubelet/plugins/csi.spiffe.io + type: DirectoryOrCreate + # This volume is where the SPIFFE CSI driver mounts volumes + - name: mountpoint-dir + hostPath: + path: /var/lib/kubelet/pods + type: Directory + # This volume is where the node-driver-registrar registers the plugin + # with kubelet + - name: kubelet-plugin-registration-dir + hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory diff --git a/test/testdata/spire/spire-server.yaml b/test/testdata/spire/spire-server.yaml new file mode 100644 index 0000000000..ceec824613 --- /dev/null +++ b/test/testdata/spire/spire-server.yaml @@ -0,0 +1,211 @@ +# ServiceAccount used by the SPIRE server. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: spire-server + namespace: spire + +--- + +# Required cluster role to allow spire-server to query k8s API server +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role +rules: +- apiGroups: [""] + resources: ["nodes"] + verbs: ["get"] + # allow TokenReview requests (to verify service account tokens for PSAT + # attestation) +- apiGroups: ["authentication.k8s.io"] + resources: ["tokenreviews"] + verbs: ["get", "create"] + +--- + +# Binds above cluster role to spire-server service account +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-cluster-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: ClusterRole + name: spire-server-cluster-role + apiGroup: rbac.authorization.k8s.io + +--- + +# Role for the SPIRE server +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: spire + name: spire-server-role +rules: + # allow "get" access to pods (to resolve selectors for PSAT attestation) +- apiGroups: [""] + resources: ["pods"] + verbs: ["get"] + # allow access to "get" and "patch" the spire-bundle ConfigMap (for SPIRE + # agent bootstrapping, see the spire-bundle ConfigMap below) +- apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["spire-bundle"] + verbs: ["get", "patch"] + +--- + +# RoleBinding granting the spire-server-role to the SPIRE server +# service account. +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: spire-server-role-binding + namespace: spire +subjects: +- kind: ServiceAccount + name: spire-server + namespace: spire +roleRef: + kind: Role + name: spire-server-role + apiGroup: rbac.authorization.k8s.io + +--- + +# ConfigMap containing the latest trust bundle for the trust domain. It is +# updated by SPIRE using the k8sbundle notifier plugin. SPIRE agents mount +# this config map and use the certificate to bootstrap trust with the SPIRE +# server during attestation. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-bundle + namespace: spire + +--- + +# ConfigMap containing the SPIRE server configuration. +apiVersion: v1 +kind: ConfigMap +metadata: + name: spire-server + namespace: spire +data: + server.conf: | + server { + bind_address = "0.0.0.0" + bind_port = "8081" + trust_domain = "example.org" + data_dir = "/run/spire/data" + log_level = "DEBUG" + default_svid_ttl = "1h" + ca_ttl = "12h" + ca_subject { + country = ["US"] + organization = ["SPIFFE"] + common_name = "" + } + } + + plugins { + DataStore "sql" { + plugin_data { + database_type = "sqlite3" + connection_string = "/run/spire/data/datastore.sqlite3" + } + } + + NodeAttestor "k8s_psat" { + plugin_data { + clusters = { + "example-cluster" = { + service_account_allow_list = ["spire:spire-agent"] + } + } + } + } + + KeyManager "disk" { + plugin_data { + keys_path = "/run/spire/data/keys.json" + } + } + + Notifier "k8sbundle" { + plugin_data { + # This plugin updates the bundle.crt value in the spire:spire-bundle + # ConfigMap by default, so no additional configuration is necessary. + } + } + } + + health_checks { + listener_enabled = true + bind_address = "0.0.0.0" + bind_port = "8080" + live_path = "/live" + ready_path = "/ready" + } + +--- + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: spire-server + namespace: spire + labels: + app: spire-server +spec: + replicas: 1 + selector: + matchLabels: + app: spire-server + template: + metadata: + namespace: spire + labels: + app: spire-server + spec: + serviceAccountName: spire-server + shareProcessNamespace: true + containers: + - name: spire-server + image: ghcr.io/spiffe/spire-server:1.1.1 + imagePullPolicy: IfNotPresent + args: ["-config", "/run/spire/config/server.conf"] + ports: + - containerPort: 8081 + volumeMounts: + - name: spire-config + mountPath: /run/spire/config + readOnly: true + volumes: + - name: spire-config + configMap: + name: spire-server + +--- + +# Service definition for SPIRE server defining the gRPC port. +apiVersion: v1 +kind: Service +metadata: + name: spire-server + namespace: spire +spec: + type: NodePort + ports: + - name: grpc + port: 8081 + targetPort: 8081 + protocol: TCP + selector: + app: spire-server diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go index 0be62d7fc9..ea8fbfbb4d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -97,6 +97,7 @@ type ceClient struct { receiverMu sync.Mutex eventDefaulterFns []EventDefaulter pollGoroutines int + blockingCallback bool } func (c *ceClient) applyOptions(opts ...Option) error { @@ -128,11 +129,10 @@ func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { return err } - // Event has been defaulted and validated, record we are going to preform send. + // Event has been defaulted and validated, record we are going to perform send. ctx, cb := c.observabilityService.RecordSendingEvent(ctx, e) - defer cb(err) - err = c.sender.Send(ctx, (*binding.EventMessage)(&e)) + defer cb(err) return err } @@ -160,7 +160,6 @@ func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, pr // Event has been defaulted and validated, record we are going to perform request. ctx, cb := c.observabilityService.RecordRequestEvent(ctx, e) - defer cb(err, resp) // If provided a requester, use it to do request/response. var msg binding.Message @@ -186,7 +185,7 @@ func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, pr } else { resp = rs } - + defer cb(err, resp) return resp, err } @@ -250,14 +249,22 @@ func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { continue } - // Do not block on the invoker. - wg.Add(1) - go func() { + callback := func() { if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { cecontext.LoggerFrom(ctx).Warn("Error while handling a message: ", err) } - wg.Done() - }() + } + + if c.blockingCallback { + callback() + } else { + // Do not block on the invoker. + wg.Add(1) + go func() { + defer wg.Done() + callback() + }() + } } }() } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go index e6d11f55f3..403fb0f559 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -81,9 +81,9 @@ func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn p var cb func(error) ctx, cb = r.observabilityService.RecordCallingInvoker(ctx, e) - defer cb(result) resp, result = r.fn.invoke(ctx, e) + defer cb(result) return }() @@ -127,6 +127,9 @@ func (r *receiveInvoker) IsResponder() bool { func computeInboundContext(message binding.Message, fallback context.Context, inboundContextDecorators []func(context.Context, binding.Message) context.Context) context.Context { result := fallback + if mctx, ok := message.(binding.MessageContext); ok { + result = cecontext.ValuesDelegating(mctx.Context(), fallback) + } for _, f := range inboundContextDecorators { result = f(result, message) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go index d0fe9dbaa9..938478162b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -8,6 +8,7 @@ package client import ( "context" "fmt" + "github.com/cloudevents/sdk-go/v2/binding" ) @@ -113,3 +114,15 @@ func WithInboundContextDecorator(dec func(context.Context, binding.Message) cont return nil } } + +// WithBlockingCallback makes the callback passed into StartReceiver is executed as a blocking call, +// i.e. in each poll go routine, the next event will not be received until the callback on current event completes. +// To make event processing serialized (no concurrency), use this option along with WithPollGoroutines(1) +func WithBlockingCallback() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.blockingCallback = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go new file mode 100644 index 0000000000..434a4da7a0 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/delegating.go @@ -0,0 +1,25 @@ +package context + +import "context" + +type valuesDelegating struct { + context.Context + parent context.Context +} + +// ValuesDelegating wraps a child and parent context. It will perform Value() +// lookups first on the child, and then fall back to the child. All other calls +// go solely to the child context. +func ValuesDelegating(child, parent context.Context) context.Context { + return &valuesDelegating{ + Context: child, + parent: parent, + } +} + +func (c *valuesDelegating) Value(key interface{}) interface{} { + if val := c.Context.Value(key); val != nil { + return val + } + return c.parent.Value(key) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go index 0f18314827..8fc449ed94 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -73,7 +73,7 @@ func (e Event) Data() []byte { } // DataAs attempts to populate the provided data object with the event payload. -// data should be a pointer type. +// obj should be a pointer type. func (e Event) DataAs(obj interface{}) error { data := e.Data() diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go index 561f4c5dfb..c511c81c45 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -20,6 +20,17 @@ const ( CloudEventsVersionV03 = "0.3" ) +var specV03Attributes = map[string]struct{}{ + "type": {}, + "source": {}, + "subject": {}, + "id": {}, + "time": {}, + "schemaurl": {}, + "datacontenttype": {}, + "datacontentencoding": {}, +} + // EventContextV03 represents the non-data attributes of a CloudEvents v0.3 // event. type EventContextV03 struct { @@ -78,11 +89,17 @@ func (ec EventContextV03) ExtensionAs(name string, obj interface{}) error { } } -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name uses a reserved event context key. func (ec *EventContextV03) SetExtension(name string, value interface{}) error { if ec.Extensions == nil { ec.Extensions = make(map[string]interface{}) } + + if _, ok := specV03Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + if value == nil { delete(ec.Extensions, name) if len(ec.Extensions) == 0 { diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go index 01f97586f6..8f164502b0 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -21,6 +21,17 @@ const ( CloudEventsVersionV1 = "1.0" ) +var specV1Attributes = map[string]struct{}{ + "id": {}, + "source": {}, + "type": {}, + "datacontenttype": {}, + "subject": {}, + "time": {}, + "specversion": {}, + "dataschema": {}, +} + // EventContextV1 represents the non-data attributes of a CloudEvents v1.0 // event. type EventContextV1 struct { @@ -73,13 +84,18 @@ func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { return fmt.Errorf("unknown extension type %T", obj) } -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. -// This function fails if the name doesn't respect the regex ^[a-zA-Z0-9]+$ +// SetExtension adds the extension 'name' with value 'value' to the CloudEvents +// context. This function fails if the name doesn't respect the regex +// ^[a-zA-Z0-9]+$ or if the name uses a reserved event context key. func (ec *EventContextV1) SetExtension(name string, value interface{}) error { if err := validateExtensionName(name); err != nil { return err } + if _, ok := specV1Attributes[strings.ToLower(name)]; ok { + return fmt.Errorf("bad key %q: CloudEvents spec attribute MUST NOT be overwritten by extension", name) + } + name = strings.ToLower(name) if ec.Extensions == nil { ec.Extensions = make(map[string]interface{}) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go new file mode 100644 index 0000000000..0eec396a1e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/context.go @@ -0,0 +1,48 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + + nethttp "net/http" + "net/url" +) + +type requestKey struct{} + +// RequestData holds the http.Request information subset that can be +// used to retrieve HTTP information for an incoming CloudEvent. +type RequestData struct { + URL *url.URL + Header nethttp.Header + RemoteAddr string + Host string +} + +// WithRequestDataAtContext uses the http.Request to add RequestData +// information to the Context. +func WithRequestDataAtContext(ctx context.Context, r *nethttp.Request) context.Context { + if r == nil { + return ctx + } + + return context.WithValue(ctx, requestKey{}, &RequestData{ + URL: r.URL, + Header: r.Header, + RemoteAddr: r.RemoteAddr, + Host: r.Host, + }) +} + +// RequestDataFromContext retrieves RequestData from the Context. +// If not set nil is returned. +func RequestDataFromContext(ctx context.Context) *RequestData { + if req := ctx.Value(requestKey{}); req != nil { + return req.(*RequestData) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go index 55031939c6..5e400905a7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -248,7 +248,7 @@ func WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, if p == nil { return fmt.Errorf("http OPTIONS handler func can not set nil protocol") } - p.OptionsHandlerFn = p.DeleteHandlerFn + p.OptionsHandlerFn = p.OptionsHandler p.WebhookConfig = &WebhookConfig{ AllowedMethods: methods, AllowedRate: &rate, @@ -277,3 +277,25 @@ func WithIsRetriableFunc(isRetriable IsRetriable) Option { return nil } } + +func WithRateLimiter(rl RateLimiter) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http OPTIONS handler func can not set nil protocol") + } + p.limiter = rl + return nil + } +} + +// WithRequestDataAtContextMiddleware adds to the Context RequestData. +// This enables a user's dispatch handler to inspect HTTP request information by +// retrieving it from the Context. +func WithRequestDataAtContextMiddleware() Option { + return WithMiddleware(func(next nethttp.Handler) nethttp.Handler { + return nethttp.HandlerFunc(func(w nethttp.ResponseWriter, r *nethttp.Request) { + ctx := WithRequestDataAtContext(r.Context(), r) + next.ServeHTTP(w, r.WithContext(ctx)) + }) + }) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go index a17028795d..06204b2a1f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -13,6 +13,7 @@ import ( "io" "net/http" "net/url" + "strconv" "sync" "sync/atomic" "time" @@ -86,6 +87,7 @@ type Protocol struct { server *http.Server handlerRegistered bool middleware []Middleware + limiter RateLimiter isRetriableFunc IsRetriable } @@ -115,6 +117,10 @@ func New(opts ...Option) (*Protocol, error) { p.isRetriableFunc = defaultIsRetriableFunc } + if p.limiter == nil { + p.limiter = noOpLimiter{} + } + return p, nil } @@ -151,7 +157,14 @@ func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ... buf := new(bytes.Buffer) buf.ReadFrom(message.BodyReader) errorStr := buf.String() - err = NewResult(res.StatusCode, "%s", errorStr) + // If the error is not wrapped, then append the original error string. + if og, ok := err.(*Result); ok { + og.Format = og.Format + "%s" + og.Args = append(og.Args, errorStr) + err = og + } else { + err = NewResult(res.StatusCode, "%w: %s", err, errorStr) + } } } } @@ -277,6 +290,20 @@ func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.Respo // ServeHTTP implements http.Handler. // Blocks until ResponseFn is invoked. func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + // always apply limiter first using req context + ok, reset, err := p.limiter.Allow(req.Context(), req) + if err != nil { + p.incoming <- msgErr{msg: nil, err: fmt.Errorf("unable to acquire rate limit token: %w", err)} + rw.WriteHeader(http.StatusInternalServerError) + return + } + + if !ok { + rw.Header().Add("Retry-After", strconv.Itoa(int(reset))) + http.Error(rw, "limit exceeded", 429) + return + } + // Filter the GET style methods: switch req.Method { case http.MethodOptions: diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go new file mode 100644 index 0000000000..9c4c10a293 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_rate.go @@ -0,0 +1,34 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + +package http + +import ( + "context" + "net/http" +) + +type RateLimiter interface { + // Allow attempts to take one token from the rate limiter for the specified + // request. It returns ok when this operation was successful. In case ok is + // false, reset will indicate the time in seconds when it is safe to perform + // another attempt. An error is returned when this operation failed, e.g. due to + // a backend error. + Allow(ctx context.Context, r *http.Request) (ok bool, reset uint64, err error) + // Close terminates rate limiter and cleans up any data structures or + // connections that may remain open. After a store is stopped, Take() should + // always return zero values. + Close(ctx context.Context) error +} + +type noOpLimiter struct{} + +func (n noOpLimiter) Allow(ctx context.Context, r *http.Request) (bool, uint64, error) { + return true, 0, nil +} + +func (n noOpLimiter) Close(ctx context.Context) error { + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go index fb7bcd27ef..71e7346f30 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -6,8 +6,11 @@ package http import ( + "bytes" "context" "errors" + "io" + "io/ioutil" "net/http" "net/url" "time" @@ -53,6 +56,24 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam retry := 0 results := make([]protocol.Result, 0) + var ( + body []byte + err error + ) + + if req != nil && req.Body != nil { + defer func() { + if err = req.Body.Close(); err != nil { + cecontext.LoggerFrom(ctx).Warnw("could not close request body", zap.Error(err)) + } + }() + body, err = ioutil.ReadAll(req.Body) + if err != nil { + panic(err) + } + resetBody(req, body) + } + for { msg, result := p.doOnce(req) @@ -90,6 +111,8 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam } DoBackoff: + resetBody(req, body) + // Wait for the correct amount of backoff time. // total tries = retry + 1 @@ -103,3 +126,20 @@ func (p *Protocol) doWithRetry(ctx context.Context, params *cecontext.RetryParam results = append(results, result) } } + +// reset body to allow it to be read multiple times, e.g. when retrying http +// requests +func resetBody(req *http.Request, body []byte) { + if req == nil || req.Body == nil { + return + } + + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + + // do not modify existing GetBody function + if req.GetBody == nil { + req.GetBody = func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(body)), nil + } + } +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go new file mode 100644 index 0000000000..b3f7e7e4ad --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/authorizer.go @@ -0,0 +1,40 @@ +package tlsconfig + +import ( + "crypto/x509" + + "github.com/spiffe/go-spiffe/v2/spiffeid" +) + +// Authorizer authorizes an X509-SVID given the SPIFFE ID and the chain +// of trust. The certificate chain starts with the X509-SVID certificate back +// to an X.509 root for the trust domain. +type Authorizer func(id spiffeid.ID, verifiedChains [][]*x509.Certificate) error + +// AuthorizeAny allows any SPIFFE ID. +func AuthorizeAny() Authorizer { + return AdaptMatcher(spiffeid.MatchAny()) +} + +// AuthorizeID allows a specific SPIFFE ID. +func AuthorizeID(allowed spiffeid.ID) Authorizer { + return AdaptMatcher(spiffeid.MatchID(allowed)) +} + +// AuthorizeOneOf allows any SPIFFE ID in the given list of IDs. +func AuthorizeOneOf(allowed ...spiffeid.ID) Authorizer { + return AdaptMatcher(spiffeid.MatchOneOf(allowed...)) +} + +// AuthorizeMemberOf allows any SPIFFE ID in the given trust domain. +func AuthorizeMemberOf(allowed spiffeid.TrustDomain) Authorizer { + return AdaptMatcher(spiffeid.MatchMemberOf(allowed)) +} + +// AdaptMatcher adapts any spiffeid.Matcher for use as an Authorizer which +// only authorizes the SPIFFE ID but otherwise ignores the verified chains. +func AdaptMatcher(matcher spiffeid.Matcher) Authorizer { + return Authorizer(func(actual spiffeid.ID, verifiedChains [][]*x509.Certificate) error { + return matcher(actual) + }) +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go new file mode 100644 index 0000000000..53b36ed07c --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/config.go @@ -0,0 +1,245 @@ +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + + "github.com/spiffe/go-spiffe/v2/bundle/x509bundle" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" +) + +// TLSClientConfig returns a TLS configuration which verifies and authorizes +// the server X509-SVID. +func TLSClientConfig(bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookTLSClientConfig(config, bundle, authorizer, opts...) + return config +} + +// HookTLSClientConfig sets up the TLS configuration to verify and authorize +// the server X509-SVID. If there is an existing callback set for +// VerifyPeerCertificate it will be wrapped by by this package and invoked +// after SPIFFE authentication has completed. +func HookTLSClientConfig(config *tls.Config, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.InsecureSkipVerify = true + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// A Option changes the defaults used to by mTLS ClientConfig functions. +type Option interface { + apply(*options) +} + +type option func(*options) + +func (fn option) apply(o *options) { fn(o) } + +type options struct { + trace Trace +} + +func newOptions(opts []Option) *options { + out := &options{} + for _, opt := range opts { + opt.apply(out) + } + return out +} + +// WithTrace will use the provided tracing callbacks +// when various TLS config functions gets invoked. +func WithTrace(trace Trace) Option { + return option(func(opts *options) { + opts.trace = trace + }) +} + +// MTLSClientConfig returns a TLS configuration which presents an X509-SVID +// to the server and verifies and authorizes the server X509-SVID. +func MTLSClientConfig(svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSClientConfig(config, svid, bundle, authorizer, opts...) + return config +} + +// HookMTLSClientConfig sets up the TLS configuration to present an X509-SVID +// to the server and verify and authorize the server X509-SVID. If there is an +// existing callback set for VerifyPeerCertificate it will be wrapped by by +// this package and invoked after SPIFFE authentication has completed. +func HookMTLSClientConfig(config *tls.Config, svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.GetClientCertificate = GetClientCertificate(svid, opts...) + config.InsecureSkipVerify = true + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// MTLSWebClientConfig returns a TLS configuration which presents an X509-SVID +// to the server and verifies the server certificate using provided roots (or +// the system roots if nil). +func MTLSWebClientConfig(svid x509svid.Source, roots *x509.CertPool, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSWebClientConfig(config, svid, roots, opts...) + return config +} + +// HookMTLSWebClientConfig sets up the TLS configuration to present an +// X509-SVID to the server and verifies the server certificate using the +// provided roots (or the system roots if nil). +func HookMTLSWebClientConfig(config *tls.Config, svid x509svid.Source, roots *x509.CertPool, opts ...Option) { + resetAuthFields(config) + config.GetClientCertificate = GetClientCertificate(svid, opts...) + config.RootCAs = roots +} + +// TLSServerConfig returns a TLS configuration which presents an X509-SVID +// to the client and does not require or verify client certificates. +func TLSServerConfig(svid x509svid.Source, opts ...Option) *tls.Config { + config := new(tls.Config) + HookTLSServerConfig(config, svid, opts...) + return config +} + +// HookTLSServerConfig sets up the TLS configuration to present an X509-SVID +// to the client and to not require or verify client certificates. +func HookTLSServerConfig(config *tls.Config, svid x509svid.Source, opts ...Option) { + resetAuthFields(config) + config.GetCertificate = GetCertificate(svid, opts...) +} + +// MTLSServerConfig returns a TLS configuration which presents an X509-SVID +// to the client and requires, verifies, and authorizes client X509-SVIDs. +func MTLSServerConfig(svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSServerConfig(config, svid, bundle, authorizer, opts...) + return config +} + +// HookMTLSServerConfig sets up the TLS configuration to present an X509-SVID +// to the client and require, verify, and authorize the client X509-SVID. If +// there is an existing callback set for VerifyPeerCertificate it will be +// wrapped by by this package and invoked after SPIFFE authentication has +// completed. +func HookMTLSServerConfig(config *tls.Config, svid x509svid.Source, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.ClientAuth = tls.RequireAnyClientCert + config.GetCertificate = GetCertificate(svid, opts...) + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// MTLSWebServerConfig returns a TLS configuration which presents a web +// server certificate to the client and requires, verifies, and authorizes +// client X509-SVIDs. +func MTLSWebServerConfig(cert *tls.Certificate, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) *tls.Config { + config := new(tls.Config) + HookMTLSWebServerConfig(config, cert, bundle, authorizer, opts...) + return config +} + +// HookMTLSWebServerConfig sets up the TLS configuration to presents a web +// server certificate to the client and require, verify, and authorize client +// X509-SVIDs. If there is an existing callback set for VerifyPeerCertificate +// it will be wrapped by by this package and invoked after SPIFFE +// authentication has completed. +func HookMTLSWebServerConfig(config *tls.Config, cert *tls.Certificate, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) { + resetAuthFields(config) + config.ClientAuth = tls.RequireAnyClientCert + config.Certificates = []tls.Certificate{*cert} + config.VerifyPeerCertificate = WrapVerifyPeerCertificate(config.VerifyPeerCertificate, bundle, authorizer, opts...) +} + +// GetCertificate returns a GetCertificate callback for tls.Config. It uses the +// given X509-SVID getter to obtain a server X509-SVID for the TLS handshake. +func GetCertificate(svid x509svid.Source, opts ...Option) func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + opt := newOptions(opts) + return func(*tls.ClientHelloInfo) (*tls.Certificate, error) { + return getTLSCertificate(svid, opt.trace) + } +} + +// GetClientCertificate returns a GetClientCertificate callback for tls.Config. +// It uses the given X509-SVID getter to obtain a client X509-SVID for the TLS +// handshake. +func GetClientCertificate(svid x509svid.Source, opts ...Option) func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + opt := newOptions(opts) + return func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + return getTLSCertificate(svid, opt.trace) + } +} + +// VerifyPeerCertificate returns a VerifyPeerCertificate callback for +// tls.Config. It uses the given bundle source and authorizer to verify and +// authorize X509-SVIDs provided by peers during the TLS handshake. +func VerifyPeerCertificate(bundle x509bundle.Source, authorizer Authorizer, opts ...Option) func([][]byte, [][]*x509.Certificate) error { + return func(raw [][]byte, _ [][]*x509.Certificate) error { + id, certs, err := x509svid.ParseAndVerify(raw, bundle) + if err != nil { + return err + } + + return authorizer(id, certs) + } +} + +// WrapVerifyPeerCertificate wraps a VeriyPeerCertificate callback, performing +// SPIFFE authentication against the peer certificates using the given bundle and +// authorizer. The wrapped callback will be passed the verified chains. +// Note: TLS clients must set `InsecureSkipVerify` when doing SPIFFE authentication to disable hostname verification. +func WrapVerifyPeerCertificate(wrapped func([][]byte, [][]*x509.Certificate) error, bundle x509bundle.Source, authorizer Authorizer, opts ...Option) func([][]byte, [][]*x509.Certificate) error { + if wrapped == nil { + return VerifyPeerCertificate(bundle, authorizer, opts...) + } + + return func(raw [][]byte, _ [][]*x509.Certificate) error { + id, certs, err := x509svid.ParseAndVerify(raw, bundle) + if err != nil { + return err + } + + if err := authorizer(id, certs); err != nil { + return err + } + + return wrapped(raw, certs) + } +} + +func getTLSCertificate(svid x509svid.Source, trace Trace) (*tls.Certificate, error) { + var traceVal interface{} + if trace.GetCertificate != nil { + traceVal = trace.GetCertificate(GetCertificateInfo{}) + } + + s, err := svid.GetX509SVID() + if err != nil { + if trace.GotCertificate != nil { + trace.GotCertificate(GotCertificateInfo{Err: err}, traceVal) + } + return nil, err + } + + cert := &tls.Certificate{ + Certificate: make([][]byte, 0, len(s.Certificates)), + PrivateKey: s.PrivateKey, + } + + for _, svidCert := range s.Certificates { + cert.Certificate = append(cert.Certificate, svidCert.Raw) + } + + if trace.GotCertificate != nil { + trace.GotCertificate(GotCertificateInfo{Cert: cert}, traceVal) + } + + return cert, nil +} + +func resetAuthFields(config *tls.Config) { + config.Certificates = nil + config.ClientAuth = tls.NoClientCert + config.GetCertificate = nil + config.GetClientCertificate = nil + config.InsecureSkipVerify = false + config.NameToCertificate = nil //nolint:staticcheck // setting to nil is OK + config.RootCAs = nil +} diff --git a/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go new file mode 100644 index 0000000000..954d3945d3 --- /dev/null +++ b/vendor/github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig/trace.go @@ -0,0 +1,22 @@ +package tlsconfig + +import ( + "crypto/tls" +) + +// GetCertificateInfo is an empty placeholder for future expansion +type GetCertificateInfo struct { +} + +// GotCertificateInfo provides err and TLS certificate info to Trace +type GotCertificateInfo struct { + Cert *tls.Certificate + Err error +} + +// Trace is the interface to define what functions are triggered when functions +// in tlsconfig are called +type Trace struct { + GetCertificate func(GetCertificateInfo) interface{} + GotCertificate func(GotCertificateInfo, interface{}) +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/LICENSE b/vendor/github.com/spiffe/spire-api-sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go new file mode 100644 index 0000000000..b148b782ca --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.pb.go @@ -0,0 +1,1473 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/server/entry/v1/entry.proto + +package entryv1 + +import ( + types "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CountEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CountEntriesRequest) Reset() { + *x = CountEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CountEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountEntriesRequest) ProtoMessage() {} + +func (x *CountEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountEntriesRequest.ProtoReflect.Descriptor instead. +func (*CountEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{0} +} + +type CountEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Count int32 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *CountEntriesResponse) Reset() { + *x = CountEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CountEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CountEntriesResponse) ProtoMessage() {} + +func (x *CountEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CountEntriesResponse.ProtoReflect.Descriptor instead. +func (*CountEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *CountEntriesResponse) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type ListEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Filters the entries returned in the response. + Filter *ListEntriesRequest_Filter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` + // The maximum number of results to return. The server may further + // constrain this value, or if zero, choose its own. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The next_page_token value returned from a previous request, if any. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListEntriesRequest) Reset() { + *x = ListEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest) ProtoMessage() {} + +func (x *ListEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesRequest.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{2} +} + +func (x *ListEntriesRequest) GetFilter() *ListEntriesRequest_Filter { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ListEntriesRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +func (x *ListEntriesRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListEntriesRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +type ListEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of entries. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // The page token for the next request. Empty if there are no more results. + // This field should be checked by clients even when a page_size was not + // requested, since the server may choose its own (see page_size). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListEntriesResponse) Reset() { + *x = ListEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesResponse) ProtoMessage() {} + +func (x *ListEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesResponse.ProtoReflect.Descriptor instead. +func (*ListEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{3} +} + +func (x *ListEntriesResponse) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *ListEntriesResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type GetEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. ID of the entry to get. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *GetEntryRequest) Reset() { + *x = GetEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetEntryRequest) ProtoMessage() {} + +func (x *GetEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetEntryRequest.ProtoReflect.Descriptor instead. +func (*GetEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{4} +} + +func (x *GetEntryRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *GetEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchCreateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entries to be created. The entry ID field is output only, and will + // be ignored here. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // An output mask indicating the entry fields set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,2,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *BatchCreateEntryRequest) Reset() { + *x = BatchCreateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryRequest) ProtoMessage() {} + +func (x *BatchCreateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{5} +} + +func (x *BatchCreateEntryRequest) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *BatchCreateEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchCreateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry in the request (order is maintained). + Results []*BatchCreateEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchCreateEntryResponse) Reset() { + *x = BatchCreateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryResponse) ProtoMessage() {} + +func (x *BatchCreateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{6} +} + +func (x *BatchCreateEntryResponse) GetResults() []*BatchCreateEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type BatchUpdateEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The entries to be updated. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + // An input mask indicating what entry fields should be updated. + InputMask *types.EntryMask `protobuf:"bytes,2,opt,name=input_mask,json=inputMask,proto3" json:"input_mask,omitempty"` + // An output mask indicating what entry fields are set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,3,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *BatchUpdateEntryRequest) Reset() { + *x = BatchUpdateEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryRequest) ProtoMessage() {} + +func (x *BatchUpdateEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{7} +} + +func (x *BatchUpdateEntryRequest) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +func (x *BatchUpdateEntryRequest) GetInputMask() *types.EntryMask { + if x != nil { + return x.InputMask + } + return nil +} + +func (x *BatchUpdateEntryRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type BatchUpdateEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry in the request (order is maintained). + Results []*BatchUpdateEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchUpdateEntryResponse) Reset() { + *x = BatchUpdateEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryResponse) ProtoMessage() {} + +func (x *BatchUpdateEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{8} +} + +func (x *BatchUpdateEntryResponse) GetResults() []*BatchUpdateEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type BatchDeleteEntryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // IDs of the entries to delete. + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *BatchDeleteEntryRequest) Reset() { + *x = BatchDeleteEntryRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryRequest) ProtoMessage() {} + +func (x *BatchDeleteEntryRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryRequest.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{9} +} + +func (x *BatchDeleteEntryRequest) GetIds() []string { + if x != nil { + return x.Ids + } + return nil +} + +type BatchDeleteEntryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Result for each entry ID in the request (order is maintained). + Results []*BatchDeleteEntryResponse_Result `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` +} + +func (x *BatchDeleteEntryResponse) Reset() { + *x = BatchDeleteEntryResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryResponse) ProtoMessage() {} + +func (x *BatchDeleteEntryResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryResponse.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{10} +} + +func (x *BatchDeleteEntryResponse) GetResults() []*BatchDeleteEntryResponse_Result { + if x != nil { + return x.Results + } + return nil +} + +type GetAuthorizedEntriesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An output mask indicating which fields are set in the response. + OutputMask *types.EntryMask `protobuf:"bytes,1,opt,name=output_mask,json=outputMask,proto3" json:"output_mask,omitempty"` +} + +func (x *GetAuthorizedEntriesRequest) Reset() { + *x = GetAuthorizedEntriesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAuthorizedEntriesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizedEntriesRequest) ProtoMessage() {} + +func (x *GetAuthorizedEntriesRequest) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizedEntriesRequest.ProtoReflect.Descriptor instead. +func (*GetAuthorizedEntriesRequest) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{11} +} + +func (x *GetAuthorizedEntriesRequest) GetOutputMask() *types.EntryMask { + if x != nil { + return x.OutputMask + } + return nil +} + +type GetAuthorizedEntriesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The authorized entries. + Entries []*types.Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` +} + +func (x *GetAuthorizedEntriesResponse) Reset() { + *x = GetAuthorizedEntriesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetAuthorizedEntriesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetAuthorizedEntriesResponse) ProtoMessage() {} + +func (x *GetAuthorizedEntriesResponse) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetAuthorizedEntriesResponse.ProtoReflect.Descriptor instead. +func (*GetAuthorizedEntriesResponse) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{12} +} + +func (x *GetAuthorizedEntriesResponse) GetEntries() []*types.Entry { + if x != nil { + return x.Entries + } + return nil +} + +type ListEntriesRequest_Filter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BySpiffeId *types.SPIFFEID `protobuf:"bytes,1,opt,name=by_spiffe_id,json=bySpiffeId,proto3" json:"by_spiffe_id,omitempty"` + ByParentId *types.SPIFFEID `protobuf:"bytes,2,opt,name=by_parent_id,json=byParentId,proto3" json:"by_parent_id,omitempty"` + BySelectors *types.SelectorMatch `protobuf:"bytes,3,opt,name=by_selectors,json=bySelectors,proto3" json:"by_selectors,omitempty"` + ByFederatesWith *types.FederatesWithMatch `protobuf:"bytes,4,opt,name=by_federates_with,json=byFederatesWith,proto3" json:"by_federates_with,omitempty"` +} + +func (x *ListEntriesRequest_Filter) Reset() { + *x = ListEntriesRequest_Filter{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListEntriesRequest_Filter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListEntriesRequest_Filter) ProtoMessage() {} + +func (x *ListEntriesRequest_Filter) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListEntriesRequest_Filter.ProtoReflect.Descriptor instead. +func (*ListEntriesRequest_Filter) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *ListEntriesRequest_Filter) GetBySpiffeId() *types.SPIFFEID { + if x != nil { + return x.BySpiffeId + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetByParentId() *types.SPIFFEID { + if x != nil { + return x.ByParentId + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetBySelectors() *types.SelectorMatch { + if x != nil { + return x.BySelectors + } + return nil +} + +func (x *ListEntriesRequest_Filter) GetByFederatesWith() *types.FederatesWithMatch { + if x != nil { + return x.ByFederatesWith + } + return nil +} + +type BatchCreateEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. If status code will be + // ALREADY_EXISTS if a similar entry already exists. An entry is + // similar if it has the same spiffe_id, parent_id, and selectors. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The entry that was created (.e.g status code is OK) or that already + // exists (i.e. status code is ALREADY_EXISTS). + // + // If the status code is any other value, this field will not be set. + Entry *types.Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *BatchCreateEntryResponse_Result) Reset() { + *x = BatchCreateEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchCreateEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchCreateEntryResponse_Result) ProtoMessage() {} + +func (x *BatchCreateEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchCreateEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchCreateEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{6, 0} +} + +func (x *BatchCreateEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchCreateEntryResponse_Result) GetEntry() *types.Entry { + if x != nil { + return x.Entry + } + return nil +} + +type BatchUpdateEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The entry that was updated. If the status is OK, it will be the + // entry that was updated. If the status is any other value, this field + // will not be set. + Entry *types.Entry `protobuf:"bytes,2,opt,name=entry,proto3" json:"entry,omitempty"` +} + +func (x *BatchUpdateEntryResponse_Result) Reset() { + *x = BatchUpdateEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchUpdateEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchUpdateEntryResponse_Result) ProtoMessage() {} + +func (x *BatchUpdateEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchUpdateEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchUpdateEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{8, 0} +} + +func (x *BatchUpdateEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchUpdateEntryResponse_Result) GetEntry() *types.Entry { + if x != nil { + return x.Entry + } + return nil +} + +type BatchDeleteEntryResponse_Result struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The status of creating the entry. + Status *types.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + // The ID of the entry that was deleted. + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *BatchDeleteEntryResponse_Result) Reset() { + *x = BatchDeleteEntryResponse_Result{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BatchDeleteEntryResponse_Result) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BatchDeleteEntryResponse_Result) ProtoMessage() {} + +func (x *BatchDeleteEntryResponse_Result) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_server_entry_v1_entry_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BatchDeleteEntryResponse_Result.ProtoReflect.Descriptor instead. +func (*BatchDeleteEntryResponse_Result) Descriptor() ([]byte, []int) { + return file_spire_api_server_entry_v1_entry_proto_rawDescGZIP(), []int{10, 0} +} + +func (x *BatchDeleteEntryResponse_Result) GetStatus() *types.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *BatchDeleteEntryResponse_Result) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +var File_spire_api_server_entry_v1_entry_proto protoreflect.FileDescriptor + +var file_spire_api_server_entry_v1_entry_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x1a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x23, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x77, 0x69, 0x74, 0x68, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x15, 0x0a, 0x13, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2c, 0x0a, 0x14, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xf4, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4c, + 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, + 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x1a, 0x96, 0x02, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x3b, 0x0a, 0x0c, 0x62, 0x79, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, + 0x44, 0x52, 0x0a, 0x62, 0x79, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, 0x3b, 0x0a, + 0x0c, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x0a, + 0x62, 0x79, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x41, 0x0a, 0x0c, 0x62, 0x79, + 0x5f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x52, 0x0b, 0x62, 0x79, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x4f, 0x0a, + 0x11, 0x62, 0x79, 0x5f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x69, + 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x0f, 0x62, + 0x79, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x22, 0x6f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, + 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x5e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0x88, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x65, + 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x3b, 0x0a, + 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd9, 0x01, 0x0a, 0x18, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x67, 0x0a, + 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0xc3, 0x01, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, + 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x12, + 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, 0x73, 0x6b, + 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xd9, 0x01, 0x0a, + 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, + 0x67, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x2b, 0x0a, 0x17, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0xbb, 0x01, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x54, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, + 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x49, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x22, 0x5a, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, + 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0x50, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, + 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x30, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, + 0x73, 0x32, 0xb7, 0x06, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x6f, 0x0a, 0x0c, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x0b, + 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x69, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x08, 0x47, 0x65, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x2a, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, + 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x33, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x32, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, + 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, + 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x87, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, + 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x36, 0x2e, 0x73, 0x70, 0x69, + 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, + 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x49, 0x5a, 0x47, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, + 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2f, 0x76, 0x31, 0x3b, 0x65, + 0x6e, 0x74, 0x72, 0x79, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_server_entry_v1_entry_proto_rawDescOnce sync.Once + file_spire_api_server_entry_v1_entry_proto_rawDescData = file_spire_api_server_entry_v1_entry_proto_rawDesc +) + +func file_spire_api_server_entry_v1_entry_proto_rawDescGZIP() []byte { + file_spire_api_server_entry_v1_entry_proto_rawDescOnce.Do(func() { + file_spire_api_server_entry_v1_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_server_entry_v1_entry_proto_rawDescData) + }) + return file_spire_api_server_entry_v1_entry_proto_rawDescData +} + +var file_spire_api_server_entry_v1_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_spire_api_server_entry_v1_entry_proto_goTypes = []interface{}{ + (*CountEntriesRequest)(nil), // 0: spire.api.server.entry.v1.CountEntriesRequest + (*CountEntriesResponse)(nil), // 1: spire.api.server.entry.v1.CountEntriesResponse + (*ListEntriesRequest)(nil), // 2: spire.api.server.entry.v1.ListEntriesRequest + (*ListEntriesResponse)(nil), // 3: spire.api.server.entry.v1.ListEntriesResponse + (*GetEntryRequest)(nil), // 4: spire.api.server.entry.v1.GetEntryRequest + (*BatchCreateEntryRequest)(nil), // 5: spire.api.server.entry.v1.BatchCreateEntryRequest + (*BatchCreateEntryResponse)(nil), // 6: spire.api.server.entry.v1.BatchCreateEntryResponse + (*BatchUpdateEntryRequest)(nil), // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest + (*BatchUpdateEntryResponse)(nil), // 8: spire.api.server.entry.v1.BatchUpdateEntryResponse + (*BatchDeleteEntryRequest)(nil), // 9: spire.api.server.entry.v1.BatchDeleteEntryRequest + (*BatchDeleteEntryResponse)(nil), // 10: spire.api.server.entry.v1.BatchDeleteEntryResponse + (*GetAuthorizedEntriesRequest)(nil), // 11: spire.api.server.entry.v1.GetAuthorizedEntriesRequest + (*GetAuthorizedEntriesResponse)(nil), // 12: spire.api.server.entry.v1.GetAuthorizedEntriesResponse + (*ListEntriesRequest_Filter)(nil), // 13: spire.api.server.entry.v1.ListEntriesRequest.Filter + (*BatchCreateEntryResponse_Result)(nil), // 14: spire.api.server.entry.v1.BatchCreateEntryResponse.Result + (*BatchUpdateEntryResponse_Result)(nil), // 15: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + (*BatchDeleteEntryResponse_Result)(nil), // 16: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + (*types.EntryMask)(nil), // 17: spire.api.types.EntryMask + (*types.Entry)(nil), // 18: spire.api.types.Entry + (*types.SPIFFEID)(nil), // 19: spire.api.types.SPIFFEID + (*types.SelectorMatch)(nil), // 20: spire.api.types.SelectorMatch + (*types.FederatesWithMatch)(nil), // 21: spire.api.types.FederatesWithMatch + (*types.Status)(nil), // 22: spire.api.types.Status +} +var file_spire_api_server_entry_v1_entry_proto_depIdxs = []int32{ + 13, // 0: spire.api.server.entry.v1.ListEntriesRequest.filter:type_name -> spire.api.server.entry.v1.ListEntriesRequest.Filter + 17, // 1: spire.api.server.entry.v1.ListEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 2: spire.api.server.entry.v1.ListEntriesResponse.entries:type_name -> spire.api.types.Entry + 17, // 3: spire.api.server.entry.v1.GetEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 4: spire.api.server.entry.v1.BatchCreateEntryRequest.entries:type_name -> spire.api.types.Entry + 17, // 5: spire.api.server.entry.v1.BatchCreateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 14, // 6: spire.api.server.entry.v1.BatchCreateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchCreateEntryResponse.Result + 18, // 7: spire.api.server.entry.v1.BatchUpdateEntryRequest.entries:type_name -> spire.api.types.Entry + 17, // 8: spire.api.server.entry.v1.BatchUpdateEntryRequest.input_mask:type_name -> spire.api.types.EntryMask + 17, // 9: spire.api.server.entry.v1.BatchUpdateEntryRequest.output_mask:type_name -> spire.api.types.EntryMask + 15, // 10: spire.api.server.entry.v1.BatchUpdateEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchUpdateEntryResponse.Result + 16, // 11: spire.api.server.entry.v1.BatchDeleteEntryResponse.results:type_name -> spire.api.server.entry.v1.BatchDeleteEntryResponse.Result + 17, // 12: spire.api.server.entry.v1.GetAuthorizedEntriesRequest.output_mask:type_name -> spire.api.types.EntryMask + 18, // 13: spire.api.server.entry.v1.GetAuthorizedEntriesResponse.entries:type_name -> spire.api.types.Entry + 19, // 14: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_spiffe_id:type_name -> spire.api.types.SPIFFEID + 19, // 15: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_parent_id:type_name -> spire.api.types.SPIFFEID + 20, // 16: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_selectors:type_name -> spire.api.types.SelectorMatch + 21, // 17: spire.api.server.entry.v1.ListEntriesRequest.Filter.by_federates_with:type_name -> spire.api.types.FederatesWithMatch + 22, // 18: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.status:type_name -> spire.api.types.Status + 18, // 19: spire.api.server.entry.v1.BatchCreateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 22, // 20: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.status:type_name -> spire.api.types.Status + 18, // 21: spire.api.server.entry.v1.BatchUpdateEntryResponse.Result.entry:type_name -> spire.api.types.Entry + 22, // 22: spire.api.server.entry.v1.BatchDeleteEntryResponse.Result.status:type_name -> spire.api.types.Status + 0, // 23: spire.api.server.entry.v1.Entry.CountEntries:input_type -> spire.api.server.entry.v1.CountEntriesRequest + 2, // 24: spire.api.server.entry.v1.Entry.ListEntries:input_type -> spire.api.server.entry.v1.ListEntriesRequest + 4, // 25: spire.api.server.entry.v1.Entry.GetEntry:input_type -> spire.api.server.entry.v1.GetEntryRequest + 5, // 26: spire.api.server.entry.v1.Entry.BatchCreateEntry:input_type -> spire.api.server.entry.v1.BatchCreateEntryRequest + 7, // 27: spire.api.server.entry.v1.Entry.BatchUpdateEntry:input_type -> spire.api.server.entry.v1.BatchUpdateEntryRequest + 9, // 28: spire.api.server.entry.v1.Entry.BatchDeleteEntry:input_type -> spire.api.server.entry.v1.BatchDeleteEntryRequest + 11, // 29: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:input_type -> spire.api.server.entry.v1.GetAuthorizedEntriesRequest + 1, // 30: spire.api.server.entry.v1.Entry.CountEntries:output_type -> spire.api.server.entry.v1.CountEntriesResponse + 3, // 31: spire.api.server.entry.v1.Entry.ListEntries:output_type -> spire.api.server.entry.v1.ListEntriesResponse + 18, // 32: spire.api.server.entry.v1.Entry.GetEntry:output_type -> spire.api.types.Entry + 6, // 33: spire.api.server.entry.v1.Entry.BatchCreateEntry:output_type -> spire.api.server.entry.v1.BatchCreateEntryResponse + 8, // 34: spire.api.server.entry.v1.Entry.BatchUpdateEntry:output_type -> spire.api.server.entry.v1.BatchUpdateEntryResponse + 10, // 35: spire.api.server.entry.v1.Entry.BatchDeleteEntry:output_type -> spire.api.server.entry.v1.BatchDeleteEntryResponse + 12, // 36: spire.api.server.entry.v1.Entry.GetAuthorizedEntries:output_type -> spire.api.server.entry.v1.GetAuthorizedEntriesResponse + 30, // [30:37] is the sub-list for method output_type + 23, // [23:30] is the sub-list for method input_type + 23, // [23:23] is the sub-list for extension type_name + 23, // [23:23] is the sub-list for extension extendee + 0, // [0:23] is the sub-list for field type_name +} + +func init() { file_spire_api_server_entry_v1_entry_proto_init() } +func file_spire_api_server_entry_v1_entry_proto_init() { + if File_spire_api_server_entry_v1_entry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_server_entry_v1_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CountEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CountEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAuthorizedEntriesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetAuthorizedEntriesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListEntriesRequest_Filter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchCreateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchUpdateEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_server_entry_v1_entry_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BatchDeleteEntryResponse_Result); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_server_entry_v1_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 17, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_spire_api_server_entry_v1_entry_proto_goTypes, + DependencyIndexes: file_spire_api_server_entry_v1_entry_proto_depIdxs, + MessageInfos: file_spire_api_server_entry_v1_entry_proto_msgTypes, + }.Build() + File_spire_api_server_entry_v1_entry_proto = out.File + file_spire_api_server_entry_v1_entry_proto_rawDesc = nil + file_spire_api_server_entry_v1_entry_proto_goTypes = nil + file_spire_api_server_entry_v1_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto new file mode 100644 index 0000000000..28fb498bfb --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry.proto @@ -0,0 +1,176 @@ +syntax = "proto3"; +package spire.api.server.entry.v1; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1;entryv1"; + +import "spire/api/types/entry.proto"; +import "spire/api/types/federateswith.proto"; +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; +import "spire/api/types/status.proto"; + +// Manages registration entries stored by the SPIRE Server. +service Entry { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + rpc CountEntries(CountEntriesRequest) returns (CountEntriesResponse); + + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + rpc ListEntries(ListEntriesRequest) returns (ListEntriesResponse); + + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + rpc GetEntry(GetEntryRequest) returns (spire.api.types.Entry); + + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchCreateEntry(BatchCreateEntryRequest) returns (BatchCreateEntryResponse); + + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchUpdateEntry(BatchUpdateEntryRequest) returns (BatchUpdateEntryResponse); + + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + rpc BatchDeleteEntry(BatchDeleteEntryRequest) returns (BatchDeleteEntryResponse); + + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + rpc GetAuthorizedEntries(GetAuthorizedEntriesRequest) returns (GetAuthorizedEntriesResponse); +} + +message CountEntriesRequest { +} + +message CountEntriesResponse { + int32 count = 1; +} + +message ListEntriesRequest { + message Filter { + spire.api.types.SPIFFEID by_spiffe_id = 1; + spire.api.types.SPIFFEID by_parent_id = 2; + spire.api.types.SelectorMatch by_selectors = 3; + spire.api.types.FederatesWithMatch by_federates_with = 4; + } + + // Filters the entries returned in the response. + Filter filter = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; + + // The maximum number of results to return. The server may further + // constrain this value, or if zero, choose its own. + int32 page_size = 3; + + // The next_page_token value returned from a previous request, if any. + string page_token = 4; +} + +message ListEntriesResponse { + // The list of entries. + repeated spire.api.types.Entry entries = 1; + + // The page token for the next request. Empty if there are no more results. + // This field should be checked by clients even when a page_size was not + // requested, since the server may choose its own (see page_size). + string next_page_token = 2; +} + +message GetEntryRequest { + // Required. ID of the entry to get. + string id = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; +} + +message BatchCreateEntryRequest { + // The entries to be created. The entry ID field is output only, and will + // be ignored here. + repeated spire.api.types.Entry entries = 1; + + // An output mask indicating the entry fields set in the response. + spire.api.types.EntryMask output_mask = 2; +} + +message BatchCreateEntryResponse { + message Result { + // The status of creating the entry. If status code will be + // ALREADY_EXISTS if a similar entry already exists. An entry is + // similar if it has the same spiffe_id, parent_id, and selectors. + spire.api.types.Status status = 1; + + // The entry that was created (.e.g status code is OK) or that already + // exists (i.e. status code is ALREADY_EXISTS). + // + // If the status code is any other value, this field will not be set. + spire.api.types.Entry entry = 2; + } + + // Result for each entry in the request (order is maintained). + repeated Result results = 1; +} + +message BatchUpdateEntryRequest { + // The entries to be updated. + repeated spire.api.types.Entry entries = 1; + + // An input mask indicating what entry fields should be updated. + spire.api.types.EntryMask input_mask = 2; + + // An output mask indicating what entry fields are set in the response. + spire.api.types.EntryMask output_mask = 3; +} + +message BatchUpdateEntryResponse { + message Result { + // The status of creating the entry. + spire.api.types.Status status = 1; + + // The entry that was updated. If the status is OK, it will be the + // entry that was updated. If the status is any other value, this field + // will not be set. + spire.api.types.Entry entry = 2; + } + + // Result for each entry in the request (order is maintained). + repeated Result results = 1; +} + +message BatchDeleteEntryRequest { + // IDs of the entries to delete. + repeated string ids = 1; +} + +message BatchDeleteEntryResponse { + message Result { + // The status of creating the entry. + spire.api.types.Status status = 1; + + // The ID of the entry that was deleted. + string id = 2; + } + + // Result for each entry ID in the request (order is maintained). + repeated Result results = 1; +} + +message GetAuthorizedEntriesRequest { + // An output mask indicating which fields are set in the response. + spire.api.types.EntryMask output_mask = 1; +} + +message GetAuthorizedEntriesResponse { + // The authorized entries. + repeated spire.api.types.Entry entries = 1; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go new file mode 100644 index 0000000000..d0f6b9d0f6 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1/entry_grpc.pb.go @@ -0,0 +1,358 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package entryv1 + +import ( + context "context" + types "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// EntryClient is the client API for Entry service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EntryClient interface { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + CountEntries(ctx context.Context, in *CountEntriesRequest, opts ...grpc.CallOption) (*CountEntriesResponse, error) + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + GetEntry(ctx context.Context, in *GetEntryRequest, opts ...grpc.CallOption) (*types.Entry, error) + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchCreateEntry(ctx context.Context, in *BatchCreateEntryRequest, opts ...grpc.CallOption) (*BatchCreateEntryResponse, error) + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchUpdateEntry(ctx context.Context, in *BatchUpdateEntryRequest, opts ...grpc.CallOption) (*BatchUpdateEntryResponse, error) + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchDeleteEntry(ctx context.Context, in *BatchDeleteEntryRequest, opts ...grpc.CallOption) (*BatchDeleteEntryResponse, error) + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + GetAuthorizedEntries(ctx context.Context, in *GetAuthorizedEntriesRequest, opts ...grpc.CallOption) (*GetAuthorizedEntriesResponse, error) +} + +type entryClient struct { + cc grpc.ClientConnInterface +} + +func NewEntryClient(cc grpc.ClientConnInterface) EntryClient { + return &entryClient{cc} +} + +func (c *entryClient) CountEntries(ctx context.Context, in *CountEntriesRequest, opts ...grpc.CallOption) (*CountEntriesResponse, error) { + out := new(CountEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/CountEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) ListEntries(ctx context.Context, in *ListEntriesRequest, opts ...grpc.CallOption) (*ListEntriesResponse, error) { + out := new(ListEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/ListEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) GetEntry(ctx context.Context, in *GetEntryRequest, opts ...grpc.CallOption) (*types.Entry, error) { + out := new(types.Entry) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/GetEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchCreateEntry(ctx context.Context, in *BatchCreateEntryRequest, opts ...grpc.CallOption) (*BatchCreateEntryResponse, error) { + out := new(BatchCreateEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchCreateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchUpdateEntry(ctx context.Context, in *BatchUpdateEntryRequest, opts ...grpc.CallOption) (*BatchUpdateEntryResponse, error) { + out := new(BatchUpdateEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchUpdateEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) BatchDeleteEntry(ctx context.Context, in *BatchDeleteEntryRequest, opts ...grpc.CallOption) (*BatchDeleteEntryResponse, error) { + out := new(BatchDeleteEntryResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/BatchDeleteEntry", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *entryClient) GetAuthorizedEntries(ctx context.Context, in *GetAuthorizedEntriesRequest, opts ...grpc.CallOption) (*GetAuthorizedEntriesResponse, error) { + out := new(GetAuthorizedEntriesResponse) + err := c.cc.Invoke(ctx, "/spire.api.server.entry.v1.Entry/GetAuthorizedEntries", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// EntryServer is the server API for Entry service. +// All implementations must embed UnimplementedEntryServer +// for forward compatibility +type EntryServer interface { + // Count entries. + // + // The caller must be local or present an admin X509-SVID. + CountEntries(context.Context, *CountEntriesRequest) (*CountEntriesResponse, error) + // Lists entries. + // + // The caller must be local or present an admin X509-SVID. + ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) + // Gets an entry. If the entry does not exist, NOT_FOUND is returned. + // + // The caller must be local or present an admin X509-SVID. + GetEntry(context.Context, *GetEntryRequest) (*types.Entry, error) + // Batch creates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchCreateEntry(context.Context, *BatchCreateEntryRequest) (*BatchCreateEntryResponse, error) + // Batch updates one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchUpdateEntry(context.Context, *BatchUpdateEntryRequest) (*BatchUpdateEntryResponse, error) + // Batch deletes one or more entries. + // + // The caller must be local or present an admin X509-SVID. + BatchDeleteEntry(context.Context, *BatchDeleteEntryRequest) (*BatchDeleteEntryResponse, error) + // Gets the entries the caller is authorized for. + // + // The caller must present an active agent X509-SVID. See the Agent + // AttestAgent/RenewAgent RPCs. + GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) + mustEmbedUnimplementedEntryServer() +} + +// UnimplementedEntryServer must be embedded to have forward compatible implementations. +type UnimplementedEntryServer struct { +} + +func (UnimplementedEntryServer) CountEntries(context.Context, *CountEntriesRequest) (*CountEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CountEntries not implemented") +} +func (UnimplementedEntryServer) ListEntries(context.Context, *ListEntriesRequest) (*ListEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListEntries not implemented") +} +func (UnimplementedEntryServer) GetEntry(context.Context, *GetEntryRequest) (*types.Entry, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetEntry not implemented") +} +func (UnimplementedEntryServer) BatchCreateEntry(context.Context, *BatchCreateEntryRequest) (*BatchCreateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchCreateEntry not implemented") +} +func (UnimplementedEntryServer) BatchUpdateEntry(context.Context, *BatchUpdateEntryRequest) (*BatchUpdateEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchUpdateEntry not implemented") +} +func (UnimplementedEntryServer) BatchDeleteEntry(context.Context, *BatchDeleteEntryRequest) (*BatchDeleteEntryResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BatchDeleteEntry not implemented") +} +func (UnimplementedEntryServer) GetAuthorizedEntries(context.Context, *GetAuthorizedEntriesRequest) (*GetAuthorizedEntriesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetAuthorizedEntries not implemented") +} +func (UnimplementedEntryServer) mustEmbedUnimplementedEntryServer() {} + +// UnsafeEntryServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EntryServer will +// result in compilation errors. +type UnsafeEntryServer interface { + mustEmbedUnimplementedEntryServer() +} + +func RegisterEntryServer(s grpc.ServiceRegistrar, srv EntryServer) { + s.RegisterService(&_Entry_serviceDesc, srv) +} + +func _Entry_CountEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CountEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).CountEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/CountEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).CountEntries(ctx, req.(*CountEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_ListEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).ListEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/ListEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).ListEntries(ctx, req.(*ListEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_GetEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).GetEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/GetEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).GetEntry(ctx, req.(*GetEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchCreateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchCreateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchCreateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchCreateEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchCreateEntry(ctx, req.(*BatchCreateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchUpdateEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchUpdateEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchUpdateEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchUpdateEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchUpdateEntry(ctx, req.(*BatchUpdateEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_BatchDeleteEntry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BatchDeleteEntryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).BatchDeleteEntry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/BatchDeleteEntry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).BatchDeleteEntry(ctx, req.(*BatchDeleteEntryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Entry_GetAuthorizedEntries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetAuthorizedEntriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EntryServer).GetAuthorizedEntries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/spire.api.server.entry.v1.Entry/GetAuthorizedEntries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EntryServer).GetAuthorizedEntries(ctx, req.(*GetAuthorizedEntriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Entry_serviceDesc = grpc.ServiceDesc{ + ServiceName: "spire.api.server.entry.v1.Entry", + HandlerType: (*EntryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CountEntries", + Handler: _Entry_CountEntries_Handler, + }, + { + MethodName: "ListEntries", + Handler: _Entry_ListEntries_Handler, + }, + { + MethodName: "GetEntry", + Handler: _Entry_GetEntry_Handler, + }, + { + MethodName: "BatchCreateEntry", + Handler: _Entry_BatchCreateEntry_Handler, + }, + { + MethodName: "BatchUpdateEntry", + Handler: _Entry_BatchUpdateEntry_Handler, + }, + { + MethodName: "BatchDeleteEntry", + Handler: _Entry_BatchDeleteEntry_Handler, + }, + { + MethodName: "GetAuthorizedEntries", + Handler: _Entry_GetAuthorizedEntries_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "spire/api/server/entry/v1/entry.proto", +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go new file mode 100644 index 0000000000..cf7b599810 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.pb.go @@ -0,0 +1,327 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/agent.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Agent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Output only. SPIFFE ID of the agent. + Id *SPIFFEID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Output only. The method by which the agent attested. + AttestationType string `protobuf:"bytes,2,opt,name=attestation_type,json=attestationType,proto3" json:"attestation_type,omitempty"` + // Output only. The X509-SVID serial number. + X509SvidSerialNumber string `protobuf:"bytes,3,opt,name=x509svid_serial_number,json=x509svidSerialNumber,proto3" json:"x509svid_serial_number,omitempty"` + // Output only. The X509-SVID expiration (seconds since Unix epoch). + X509SvidExpiresAt int64 `protobuf:"varint,4,opt,name=x509svid_expires_at,json=x509svidExpiresAt,proto3" json:"x509svid_expires_at,omitempty"` + // Output only. The selectors attributed to the agent during attestation. + Selectors []*Selector `protobuf:"bytes,5,rep,name=selectors,proto3" json:"selectors,omitempty"` + // Output only. Whether or not the agent is banned. + Banned bool `protobuf:"varint,6,opt,name=banned,proto3" json:"banned,omitempty"` +} + +func (x *Agent) Reset() { + *x = Agent{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_agent_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Agent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Agent) ProtoMessage() {} + +func (x *Agent) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_agent_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Agent.ProtoReflect.Descriptor instead. +func (*Agent) Descriptor() ([]byte, []int) { + return file_spire_api_types_agent_proto_rawDescGZIP(), []int{0} +} + +func (x *Agent) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *Agent) GetAttestationType() string { + if x != nil { + return x.AttestationType + } + return "" +} + +func (x *Agent) GetX509SvidSerialNumber() string { + if x != nil { + return x.X509SvidSerialNumber + } + return "" +} + +func (x *Agent) GetX509SvidExpiresAt() int64 { + if x != nil { + return x.X509SvidExpiresAt + } + return 0 +} + +func (x *Agent) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *Agent) GetBanned() bool { + if x != nil { + return x.Banned + } + return false +} + +type AgentMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // attestation_type field mask + AttestationType bool `protobuf:"varint,2,opt,name=attestation_type,json=attestationType,proto3" json:"attestation_type,omitempty"` + // x509svid_serial_number field mask + X509SvidSerialNumber bool `protobuf:"varint,3,opt,name=x509svid_serial_number,json=x509svidSerialNumber,proto3" json:"x509svid_serial_number,omitempty"` + // x509svid_expires_at field mask + X509SvidExpiresAt bool `protobuf:"varint,4,opt,name=x509svid_expires_at,json=x509svidExpiresAt,proto3" json:"x509svid_expires_at,omitempty"` + // selectors field mask + Selectors bool `protobuf:"varint,5,opt,name=selectors,proto3" json:"selectors,omitempty"` + // banned field mask + Banned bool `protobuf:"varint,6,opt,name=banned,proto3" json:"banned,omitempty"` +} + +func (x *AgentMask) Reset() { + *x = AgentMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_agent_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AgentMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AgentMask) ProtoMessage() {} + +func (x *AgentMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_agent_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AgentMask.ProtoReflect.Descriptor instead. +func (*AgentMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_agent_proto_rawDescGZIP(), []int{1} +} + +func (x *AgentMask) GetAttestationType() bool { + if x != nil { + return x.AttestationType + } + return false +} + +func (x *AgentMask) GetX509SvidSerialNumber() bool { + if x != nil { + return x.X509SvidSerialNumber + } + return false +} + +func (x *AgentMask) GetX509SvidExpiresAt() bool { + if x != nil { + return x.X509SvidExpiresAt + } + return false +} + +func (x *AgentMask) GetSelectors() bool { + if x != nil { + return x.Selectors + } + return false +} + +func (x *AgentMask) GetBanned() bool { + if x != nil { + return x.Banned + } + return false +} + +var File_spire_api_types_agent_proto protoreflect.FileDescriptor + +var file_spire_api_types_agent_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, + 0x02, 0x0a, 0x05, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, + 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x11, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, + 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x62, + 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x22, 0xd2, 0x01, 0x0a, 0x09, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x4d, + 0x61, 0x73, 0x6b, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, + 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, + 0x0a, 0x16, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, + 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, + 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x11, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x45, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x06, 0x62, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_agent_proto_rawDescOnce sync.Once + file_spire_api_types_agent_proto_rawDescData = file_spire_api_types_agent_proto_rawDesc +) + +func file_spire_api_types_agent_proto_rawDescGZIP() []byte { + file_spire_api_types_agent_proto_rawDescOnce.Do(func() { + file_spire_api_types_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_agent_proto_rawDescData) + }) + return file_spire_api_types_agent_proto_rawDescData +} + +var file_spire_api_types_agent_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_agent_proto_goTypes = []interface{}{ + (*Agent)(nil), // 0: spire.api.types.Agent + (*AgentMask)(nil), // 1: spire.api.types.AgentMask + (*SPIFFEID)(nil), // 2: spire.api.types.SPIFFEID + (*Selector)(nil), // 3: spire.api.types.Selector +} +var file_spire_api_types_agent_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.Agent.id:type_name -> spire.api.types.SPIFFEID + 3, // 1: spire.api.types.Agent.selectors:type_name -> spire.api.types.Selector + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_agent_proto_init() } +func file_spire_api_types_agent_proto_init() { + if File_spire_api_types_agent_proto != nil { + return + } + file_spire_api_types_selector_proto_init() + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_agent_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Agent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_agent_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AgentMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_agent_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_agent_proto_goTypes, + DependencyIndexes: file_spire_api_types_agent_proto_depIdxs, + MessageInfos: file_spire_api_types_agent_proto_msgTypes, + }.Build() + File_spire_api_types_agent_proto = out.File + file_spire_api_types_agent_proto_rawDesc = nil + file_spire_api_types_agent_proto_goTypes = nil + file_spire_api_types_agent_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto new file mode 100644 index 0000000000..c490bb98a5 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/agent.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; + +message Agent { + // Output only. SPIFFE ID of the agent. + spire.api.types.SPIFFEID id = 1; + + // Output only. The method by which the agent attested. + string attestation_type = 2; + + // Output only. The X509-SVID serial number. + string x509svid_serial_number = 3; + + // Output only. The X509-SVID expiration (seconds since Unix epoch). + int64 x509svid_expires_at = 4; + + // Output only. The selectors attributed to the agent during attestation. + repeated spire.api.types.Selector selectors = 5; + + // Output only. Whether or not the agent is banned. + bool banned = 6; +} + +message AgentMask { + // attestation_type field mask + bool attestation_type = 2; + + // x509svid_serial_number field mask + bool x509svid_serial_number = 3; + + // x509svid_expires_at field mask + bool x509svid_expires_at = 4; + + // selectors field mask + bool selectors = 5; + + // banned field mask + bool banned = 6; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go new file mode 100644 index 0000000000..4683df1321 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.pb.go @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/attestation.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AttestationData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of attestation data. This is typically the name of the plugin + // that produced that data. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The attestation data payload. + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *AttestationData) Reset() { + *x = AttestationData{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_attestation_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AttestationData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttestationData) ProtoMessage() {} + +func (x *AttestationData) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_attestation_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttestationData.ProtoReflect.Descriptor instead. +func (*AttestationData) Descriptor() ([]byte, []int) { + return file_spire_api_types_attestation_proto_rawDescGZIP(), []int{0} +} + +func (x *AttestationData) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *AttestationData) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +var File_spire_api_types_attestation_proto protoreflect.FileDescriptor + +var file_spire_api_types_attestation_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x22, 0x3f, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_attestation_proto_rawDescOnce sync.Once + file_spire_api_types_attestation_proto_rawDescData = file_spire_api_types_attestation_proto_rawDesc +) + +func file_spire_api_types_attestation_proto_rawDescGZIP() []byte { + file_spire_api_types_attestation_proto_rawDescOnce.Do(func() { + file_spire_api_types_attestation_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_attestation_proto_rawDescData) + }) + return file_spire_api_types_attestation_proto_rawDescData +} + +var file_spire_api_types_attestation_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_attestation_proto_goTypes = []interface{}{ + (*AttestationData)(nil), // 0: spire.api.types.AttestationData +} +var file_spire_api_types_attestation_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_attestation_proto_init() } +func file_spire_api_types_attestation_proto_init() { + if File_spire_api_types_attestation_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_attestation_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttestationData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_attestation_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_attestation_proto_goTypes, + DependencyIndexes: file_spire_api_types_attestation_proto_depIdxs, + MessageInfos: file_spire_api_types_attestation_proto_msgTypes, + }.Build() + File_spire_api_types_attestation_proto = out.File + file_spire_api_types_attestation_proto_rawDesc = nil + file_spire_api_types_attestation_proto_goTypes = nil + file_spire_api_types_attestation_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto new file mode 100644 index 0000000000..4c2677f62a --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/attestation.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message AttestationData { + // The type of attestation data. This is typically the name of the plugin + // that produced that data. + string type = 1; + + // The attestation data payload. + bytes payload = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go new file mode 100644 index 0000000000..541638e00a --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.pb.go @@ -0,0 +1,448 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/bundle.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Bundle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the trust domain the bundle belongs to (e.g., "example.org"). + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // X.509 authorities for authenticating X509-SVIDs. + X509Authorities []*X509Certificate `protobuf:"bytes,2,rep,name=x509_authorities,json=x509Authorities,proto3" json:"x509_authorities,omitempty"` + // JWT authorities for authenticating JWT-SVIDs. + JwtAuthorities []*JWTKey `protobuf:"bytes,3,rep,name=jwt_authorities,json=jwtAuthorities,proto3" json:"jwt_authorities,omitempty"` + // A hint on how often the bundle should be refreshed from the bundle + // provider, in seconds. Can be zero (meaning no hint available). + RefreshHint int64 `protobuf:"varint,4,opt,name=refresh_hint,json=refreshHint,proto3" json:"refresh_hint,omitempty"` + // The sequence number of the bundle. + SequenceNumber uint64 `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` +} + +func (x *Bundle) Reset() { + *x = Bundle{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bundle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bundle) ProtoMessage() {} + +func (x *Bundle) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bundle.ProtoReflect.Descriptor instead. +func (*Bundle) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{0} +} + +func (x *Bundle) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *Bundle) GetX509Authorities() []*X509Certificate { + if x != nil { + return x.X509Authorities + } + return nil +} + +func (x *Bundle) GetJwtAuthorities() []*JWTKey { + if x != nil { + return x.JwtAuthorities + } + return nil +} + +func (x *Bundle) GetRefreshHint() int64 { + if x != nil { + return x.RefreshHint + } + return 0 +} + +func (x *Bundle) GetSequenceNumber() uint64 { + if x != nil { + return x.SequenceNumber + } + return 0 +} + +type X509Certificate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The ASN.1 DER encoded bytes of the X.509 certificate. + Asn1 []byte `protobuf:"bytes,1,opt,name=asn1,proto3" json:"asn1,omitempty"` +} + +func (x *X509Certificate) Reset() { + *x = X509Certificate{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509Certificate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509Certificate) ProtoMessage() {} + +func (x *X509Certificate) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509Certificate.ProtoReflect.Descriptor instead. +func (*X509Certificate) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{1} +} + +func (x *X509Certificate) GetAsn1() []byte { + if x != nil { + return x.Asn1 + } + return nil +} + +type JWTKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The PKIX encoded public key. + PublicKey []byte `protobuf:"bytes,1,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + // The key identifier. + KeyId string `protobuf:"bytes,2,opt,name=key_id,json=keyId,proto3" json:"key_id,omitempty"` + // When the key expires (seconds since Unix epoch). If zero, the key does + // not expire. + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *JWTKey) Reset() { + *x = JWTKey{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTKey) ProtoMessage() {} + +func (x *JWTKey) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTKey.ProtoReflect.Descriptor instead. +func (*JWTKey) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{2} +} + +func (x *JWTKey) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +func (x *JWTKey) GetKeyId() string { + if x != nil { + return x.KeyId + } + return "" +} + +func (x *JWTKey) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +type BundleMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // x509_authorities field mask. + X509Authorities bool `protobuf:"varint,2,opt,name=x509_authorities,json=x509Authorities,proto3" json:"x509_authorities,omitempty"` + // jwt_authorities field mask. + JwtAuthorities bool `protobuf:"varint,3,opt,name=jwt_authorities,json=jwtAuthorities,proto3" json:"jwt_authorities,omitempty"` + // refresh_hint field mask. + RefreshHint bool `protobuf:"varint,4,opt,name=refresh_hint,json=refreshHint,proto3" json:"refresh_hint,omitempty"` + // sequence_number field mask. + SequenceNumber bool `protobuf:"varint,5,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` +} + +func (x *BundleMask) Reset() { + *x = BundleMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_bundle_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BundleMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BundleMask) ProtoMessage() {} + +func (x *BundleMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_bundle_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BundleMask.ProtoReflect.Descriptor instead. +func (*BundleMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_bundle_proto_rawDescGZIP(), []int{3} +} + +func (x *BundleMask) GetX509Authorities() bool { + if x != nil { + return x.X509Authorities + } + return false +} + +func (x *BundleMask) GetJwtAuthorities() bool { + if x != nil { + return x.JwtAuthorities + } + return false +} + +func (x *BundleMask) GetRefreshHint() bool { + if x != nil { + return x.RefreshHint + } + return false +} + +func (x *BundleMask) GetSequenceNumber() bool { + if x != nil { + return x.SequenceNumber + } + return false +} + +var File_spire_api_types_bundle_proto protoreflect.FileDescriptor + +var file_spire_api_types_bundle_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x86, 0x02, 0x0a, 0x06, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x4b, 0x0a, + 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x58, 0x35, 0x30, 0x39, 0x43, 0x65, + 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x41, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x0f, 0x6a, 0x77, + 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x52, 0x0e, 0x6a, 0x77, + 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x48, 0x69, 0x6e, 0x74, 0x12, + 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, + 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x25, 0x0a, 0x0f, 0x58, 0x35, 0x30, 0x39, + 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x61, + 0x73, 0x6e, 0x31, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x73, 0x6e, 0x31, 0x22, + 0x5d, 0x0a, 0x06, 0x4a, 0x57, 0x54, 0x4b, 0x65, 0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x15, 0x0a, 0x06, 0x6b, 0x65, 0x79, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x22, 0xac, + 0x01, 0x0a, 0x0a, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x29, 0x0a, + 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x41, 0x75, 0x74, + 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6a, 0x77, 0x74, 0x5f, + 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0e, 0x6a, 0x77, 0x74, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x69, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x5f, 0x68, 0x69, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, + 0x48, 0x69, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, + 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x73, + 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_bundle_proto_rawDescOnce sync.Once + file_spire_api_types_bundle_proto_rawDescData = file_spire_api_types_bundle_proto_rawDesc +) + +func file_spire_api_types_bundle_proto_rawDescGZIP() []byte { + file_spire_api_types_bundle_proto_rawDescOnce.Do(func() { + file_spire_api_types_bundle_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_bundle_proto_rawDescData) + }) + return file_spire_api_types_bundle_proto_rawDescData +} + +var file_spire_api_types_bundle_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_spire_api_types_bundle_proto_goTypes = []interface{}{ + (*Bundle)(nil), // 0: spire.api.types.Bundle + (*X509Certificate)(nil), // 1: spire.api.types.X509Certificate + (*JWTKey)(nil), // 2: spire.api.types.JWTKey + (*BundleMask)(nil), // 3: spire.api.types.BundleMask +} +var file_spire_api_types_bundle_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.Bundle.x509_authorities:type_name -> spire.api.types.X509Certificate + 2, // 1: spire.api.types.Bundle.jwt_authorities:type_name -> spire.api.types.JWTKey + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_bundle_proto_init() } +func file_spire_api_types_bundle_proto_init() { + if File_spire_api_types_bundle_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_bundle_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bundle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509Certificate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_bundle_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BundleMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_bundle_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_bundle_proto_goTypes, + DependencyIndexes: file_spire_api_types_bundle_proto_depIdxs, + MessageInfos: file_spire_api_types_bundle_proto_msgTypes, + }.Build() + File_spire_api_types_bundle_proto = out.File + file_spire_api_types_bundle_proto_rawDesc = nil + file_spire_api_types_bundle_proto_goTypes = nil + file_spire_api_types_bundle_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto new file mode 100644 index 0000000000..435f33958c --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/bundle.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Bundle { + // The name of the trust domain the bundle belongs to (e.g., "example.org"). + string trust_domain = 1; + + // X.509 authorities for authenticating X509-SVIDs. + repeated X509Certificate x509_authorities = 2; + + // JWT authorities for authenticating JWT-SVIDs. + repeated JWTKey jwt_authorities = 3; + + // A hint on how often the bundle should be refreshed from the bundle + // provider, in seconds. Can be zero (meaning no hint available). + int64 refresh_hint = 4; + + // The sequence number of the bundle. + uint64 sequence_number = 5; +} + +message X509Certificate { + // The ASN.1 DER encoded bytes of the X.509 certificate. + bytes asn1 = 1; +} + +message JWTKey { + // The PKIX encoded public key. + bytes public_key = 1; + + // The key identifier. + string key_id = 2; + + // When the key expires (seconds since Unix epoch). If zero, the key does + // not expire. + int64 expires_at = 3; +} + +message BundleMask { + // x509_authorities field mask. + bool x509_authorities = 2; + + // jwt_authorities field mask. + bool jwt_authorities = 3; + + // refresh_hint field mask. + bool refresh_hint = 4; + + // sequence_number field mask. + bool sequence_number = 5; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go new file mode 100644 index 0000000000..383024ad00 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.pb.go @@ -0,0 +1,467 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/entry.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Entry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Globally unique ID for the entry. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // The SPIFFE ID of the identity described by this entry. + SpiffeId *SPIFFEID `protobuf:"bytes,2,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // Who the entry is delegated to. If the entry describes a node, this is + // set to the SPIFFE ID of the SPIRE server of the trust domain (e.g. + // spiffe://example.org/spire/server). Otherwise, it will be set to a node + // SPIFFE ID. + ParentId *SPIFFEID `protobuf:"bytes,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + // The selectors which identify which entities match this entry. If this is + // an entry for a node, these selectors represent selectors produced by + // node attestation. Otherwise, these selectors represent those produced by + // workload attestation. + Selectors []*Selector `protobuf:"bytes,4,rep,name=selectors,proto3" json:"selectors,omitempty"` + // The time to live for identities issued for this entry (in seconds). + Ttl int32 `protobuf:"varint,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + // The names of trust domains the identity described by this entry + // federates with. + FederatesWith []string `protobuf:"bytes,6,rep,name=federates_with,json=federatesWith,proto3" json:"federates_with,omitempty"` + // Whether or not the identity described by this entry is an administrative + // workload. Administrative workloads are granted additional access to + // various managerial server APIs, such as entry registration. + Admin bool `protobuf:"varint,7,opt,name=admin,proto3" json:"admin,omitempty"` + // Whether or not the identity described by this entry represents a + // downstream SPIRE server. Downstream SPIRE servers have additional access + // to various signing APIs, such as those used to sign X.509 CA + // certificates and publish JWT signing keys. + Downstream bool `protobuf:"varint,8,opt,name=downstream,proto3" json:"downstream,omitempty"` + // When the entry expires (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,9,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // A list of DNS names associated with the identity described by this entry. + DnsNames []string `protobuf:"bytes,10,rep,name=dns_names,json=dnsNames,proto3" json:"dns_names,omitempty"` + // Revision number is bumped every time the entry is updated + RevisionNumber int64 `protobuf:"varint,11,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // Determines if the issued identity is exportable to a store + StoreSvid bool `protobuf:"varint,12,opt,name=store_svid,json=storeSvid,proto3" json:"store_svid,omitempty"` +} + +func (x *Entry) Reset() { + *x = Entry{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_entry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Entry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Entry) ProtoMessage() {} + +func (x *Entry) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_entry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Entry.ProtoReflect.Descriptor instead. +func (*Entry) Descriptor() ([]byte, []int) { + return file_spire_api_types_entry_proto_rawDescGZIP(), []int{0} +} + +func (x *Entry) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Entry) GetSpiffeId() *SPIFFEID { + if x != nil { + return x.SpiffeId + } + return nil +} + +func (x *Entry) GetParentId() *SPIFFEID { + if x != nil { + return x.ParentId + } + return nil +} + +func (x *Entry) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *Entry) GetTtl() int32 { + if x != nil { + return x.Ttl + } + return 0 +} + +func (x *Entry) GetFederatesWith() []string { + if x != nil { + return x.FederatesWith + } + return nil +} + +func (x *Entry) GetAdmin() bool { + if x != nil { + return x.Admin + } + return false +} + +func (x *Entry) GetDownstream() bool { + if x != nil { + return x.Downstream + } + return false +} + +func (x *Entry) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +func (x *Entry) GetDnsNames() []string { + if x != nil { + return x.DnsNames + } + return nil +} + +func (x *Entry) GetRevisionNumber() int64 { + if x != nil { + return x.RevisionNumber + } + return 0 +} + +func (x *Entry) GetStoreSvid() bool { + if x != nil { + return x.StoreSvid + } + return false +} + +// Field mask for Entry fields +type EntryMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // spiffe_id field mask + SpiffeId bool `protobuf:"varint,2,opt,name=spiffe_id,json=spiffeId,proto3" json:"spiffe_id,omitempty"` + // parent_id field mask + ParentId bool `protobuf:"varint,3,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + // selectors field mask + Selectors bool `protobuf:"varint,4,opt,name=selectors,proto3" json:"selectors,omitempty"` + // ttl field mask + Ttl bool `protobuf:"varint,5,opt,name=ttl,proto3" json:"ttl,omitempty"` + // federates_with field mask + FederatesWith bool `protobuf:"varint,6,opt,name=federates_with,json=federatesWith,proto3" json:"federates_with,omitempty"` + // admin field mask + Admin bool `protobuf:"varint,7,opt,name=admin,proto3" json:"admin,omitempty"` + // downstream field mask + Downstream bool `protobuf:"varint,8,opt,name=downstream,proto3" json:"downstream,omitempty"` + // expires_at field mask + ExpiresAt bool `protobuf:"varint,9,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // dns_names field mask + DnsNames bool `protobuf:"varint,10,opt,name=dns_names,json=dnsNames,proto3" json:"dns_names,omitempty"` + // revision_number field mask + RevisionNumber bool `protobuf:"varint,11,opt,name=revision_number,json=revisionNumber,proto3" json:"revision_number,omitempty"` + // store_svid field mask + StoreSvid bool `protobuf:"varint,12,opt,name=store_svid,json=storeSvid,proto3" json:"store_svid,omitempty"` +} + +func (x *EntryMask) Reset() { + *x = EntryMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_entry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EntryMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EntryMask) ProtoMessage() {} + +func (x *EntryMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_entry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EntryMask.ProtoReflect.Descriptor instead. +func (*EntryMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_entry_proto_rawDescGZIP(), []int{1} +} + +func (x *EntryMask) GetSpiffeId() bool { + if x != nil { + return x.SpiffeId + } + return false +} + +func (x *EntryMask) GetParentId() bool { + if x != nil { + return x.ParentId + } + return false +} + +func (x *EntryMask) GetSelectors() bool { + if x != nil { + return x.Selectors + } + return false +} + +func (x *EntryMask) GetTtl() bool { + if x != nil { + return x.Ttl + } + return false +} + +func (x *EntryMask) GetFederatesWith() bool { + if x != nil { + return x.FederatesWith + } + return false +} + +func (x *EntryMask) GetAdmin() bool { + if x != nil { + return x.Admin + } + return false +} + +func (x *EntryMask) GetDownstream() bool { + if x != nil { + return x.Downstream + } + return false +} + +func (x *EntryMask) GetExpiresAt() bool { + if x != nil { + return x.ExpiresAt + } + return false +} + +func (x *EntryMask) GetDnsNames() bool { + if x != nil { + return x.DnsNames + } + return false +} + +func (x *EntryMask) GetRevisionNumber() bool { + if x != nil { + return x.RevisionNumber + } + return false +} + +func (x *EntryMask) GetStoreSvid() bool { + if x != nil { + return x.StoreSvid + } + return false +} + +var File_spire_api_types_entry_proto protoreflect.FileDescriptor + +var file_spire_api_types_entry_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, + 0x03, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x36, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, + 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, + 0x12, 0x36, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x08, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, + 0x74, 0x74, 0x6c, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x66, 0x65, 0x64, + 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x08, 0x64, 0x6e, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, + 0x76, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x53, 0x76, 0x69, 0x64, 0x22, 0xd6, 0x02, 0x0a, 0x09, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x4d, 0x61, + 0x73, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, + 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x74, 0x74, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x74, 0x74, 0x6c, 0x12, 0x25, 0x0a, 0x0e, + 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, + 0x69, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x6f, 0x77, + 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, + 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x64, 0x6e, 0x73, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x6e, 0x73, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, + 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x76, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x76, 0x69, 0x64, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_entry_proto_rawDescOnce sync.Once + file_spire_api_types_entry_proto_rawDescData = file_spire_api_types_entry_proto_rawDesc +) + +func file_spire_api_types_entry_proto_rawDescGZIP() []byte { + file_spire_api_types_entry_proto_rawDescOnce.Do(func() { + file_spire_api_types_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_entry_proto_rawDescData) + }) + return file_spire_api_types_entry_proto_rawDescData +} + +var file_spire_api_types_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_entry_proto_goTypes = []interface{}{ + (*Entry)(nil), // 0: spire.api.types.Entry + (*EntryMask)(nil), // 1: spire.api.types.EntryMask + (*SPIFFEID)(nil), // 2: spire.api.types.SPIFFEID + (*Selector)(nil), // 3: spire.api.types.Selector +} +var file_spire_api_types_entry_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.Entry.spiffe_id:type_name -> spire.api.types.SPIFFEID + 2, // 1: spire.api.types.Entry.parent_id:type_name -> spire.api.types.SPIFFEID + 3, // 2: spire.api.types.Entry.selectors:type_name -> spire.api.types.Selector + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_spire_api_types_entry_proto_init() } +func file_spire_api_types_entry_proto_init() { + if File_spire_api_types_entry_proto != nil { + return + } + file_spire_api_types_selector_proto_init() + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Entry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EntryMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_entry_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_entry_proto_goTypes, + DependencyIndexes: file_spire_api_types_entry_proto_depIdxs, + MessageInfos: file_spire_api_types_entry_proto_msgTypes, + }.Build() + File_spire_api_types_entry_proto = out.File + file_spire_api_types_entry_proto_rawDesc = nil + file_spire_api_types_entry_proto_goTypes = nil + file_spire_api_types_entry_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto new file mode 100644 index 0000000000..49fa467172 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/entry.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/selector.proto"; +import "spire/api/types/spiffeid.proto"; + +message Entry { + // Globally unique ID for the entry. + string id = 1; + + // The SPIFFE ID of the identity described by this entry. + spire.api.types.SPIFFEID spiffe_id = 2; + + // Who the entry is delegated to. If the entry describes a node, this is + // set to the SPIFFE ID of the SPIRE server of the trust domain (e.g. + // spiffe://example.org/spire/server). Otherwise, it will be set to a node + // SPIFFE ID. + spire.api.types.SPIFFEID parent_id = 3; + + // The selectors which identify which entities match this entry. If this is + // an entry for a node, these selectors represent selectors produced by + // node attestation. Otherwise, these selectors represent those produced by + // workload attestation. + repeated spire.api.types.Selector selectors = 4; + + // The time to live for identities issued for this entry (in seconds). + int32 ttl = 5; + + // The names of trust domains the identity described by this entry + // federates with. + repeated string federates_with = 6; + + // Whether or not the identity described by this entry is an administrative + // workload. Administrative workloads are granted additional access to + // various managerial server APIs, such as entry registration. + bool admin = 7; + + // Whether or not the identity described by this entry represents a + // downstream SPIRE server. Downstream SPIRE servers have additional access + // to various signing APIs, such as those used to sign X.509 CA + // certificates and publish JWT signing keys. + bool downstream = 8; + + // When the entry expires (seconds since Unix epoch). + int64 expires_at = 9; + + // A list of DNS names associated with the identity described by this entry. + repeated string dns_names = 10; + + // Revision number is bumped every time the entry is updated + int64 revision_number = 11; + + // Determines if the issued identity is exportable to a store + bool store_svid = 12; +} + +// Field mask for Entry fields +message EntryMask { + // spiffe_id field mask + bool spiffe_id = 2; + + // parent_id field mask + bool parent_id = 3; + + // selectors field mask + bool selectors = 4; + + // ttl field mask + bool ttl = 5; + + // federates_with field mask + bool federates_with = 6; + + // admin field mask + bool admin = 7; + + // downstream field mask + bool downstream = 8; + + // expires_at field mask + bool expires_at = 9; + + // dns_names field mask + bool dns_names = 10; + + // revision_number field mask + bool revision_number = 11; + + // store_svid field mask + bool store_svid = 12; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go new file mode 100644 index 0000000000..ad5cfff881 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.pb.go @@ -0,0 +1,271 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/federateswith.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FederatesWithMatch_MatchBehavior int32 + +const ( + // Indicates that the federated trust domains in this match are + // equal to the candidate trust domains, independent of ordering. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_EXACT ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e2' + FederatesWithMatch_MATCH_EXACT FederatesWithMatch_MatchBehavior = 0 + // Indicates that all candidates which have a non-empty subset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUBSET ["spiffe://td1"] + // Entries that match: + // - 'e1' + FederatesWithMatch_MATCH_SUBSET FederatesWithMatch_MatchBehavior = 1 + // Indicate that all candidates which are a superset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUPERSET ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e1' + // - 'e2' + FederatesWithMatch_MATCH_SUPERSET FederatesWithMatch_MatchBehavior = 2 + // Indicates that all candidates which have at least one + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_ANY ["spiffe://td1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + FederatesWithMatch_MATCH_ANY FederatesWithMatch_MatchBehavior = 3 +) + +// Enum value maps for FederatesWithMatch_MatchBehavior. +var ( + FederatesWithMatch_MatchBehavior_name = map[int32]string{ + 0: "MATCH_EXACT", + 1: "MATCH_SUBSET", + 2: "MATCH_SUPERSET", + 3: "MATCH_ANY", + } + FederatesWithMatch_MatchBehavior_value = map[string]int32{ + "MATCH_EXACT": 0, + "MATCH_SUBSET": 1, + "MATCH_SUPERSET": 2, + "MATCH_ANY": 3, + } +) + +func (x FederatesWithMatch_MatchBehavior) Enum() *FederatesWithMatch_MatchBehavior { + p := new(FederatesWithMatch_MatchBehavior) + *p = x + return p +} + +func (x FederatesWithMatch_MatchBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FederatesWithMatch_MatchBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_federateswith_proto_enumTypes[0].Descriptor() +} + +func (FederatesWithMatch_MatchBehavior) Type() protoreflect.EnumType { + return &file_spire_api_types_federateswith_proto_enumTypes[0] +} + +func (x FederatesWithMatch_MatchBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FederatesWithMatch_MatchBehavior.Descriptor instead. +func (FederatesWithMatch_MatchBehavior) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_federateswith_proto_rawDescGZIP(), []int{0, 0} +} + +type FederatesWithMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of trust domain names to match on (e.g., "example.org"). + TrustDomains []string `protobuf:"bytes,1,rep,name=trust_domains,json=trustDomains,proto3" json:"trust_domains,omitempty"` + // How to match the trust domains. + Match FederatesWithMatch_MatchBehavior `protobuf:"varint,2,opt,name=match,proto3,enum=spire.api.types.FederatesWithMatch_MatchBehavior" json:"match,omitempty"` +} + +func (x *FederatesWithMatch) Reset() { + *x = FederatesWithMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federateswith_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederatesWithMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederatesWithMatch) ProtoMessage() {} + +func (x *FederatesWithMatch) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federateswith_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederatesWithMatch.ProtoReflect.Descriptor instead. +func (*FederatesWithMatch) Descriptor() ([]byte, []int) { + return file_spire_api_types_federateswith_proto_rawDescGZIP(), []int{0} +} + +func (x *FederatesWithMatch) GetTrustDomains() []string { + if x != nil { + return x.TrustDomains + } + return nil +} + +func (x *FederatesWithMatch) GetMatch() FederatesWithMatch_MatchBehavior { + if x != nil { + return x.Match + } + return FederatesWithMatch_MATCH_EXACT +} + +var File_spire_api_types_federateswith_proto protoreflect.FileDescriptor + +var file_spire_api_types_federateswith_proto_rawDesc = []byte{ + 0x0a, 0x23, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x77, 0x69, 0x74, 0x68, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x46, 0x65, 0x64, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x23, 0x0a, + 0x0d, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x73, 0x12, 0x47, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x31, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x65, 0x73, 0x57, 0x69, 0x74, + 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, + 0x76, 0x69, 0x6f, 0x72, 0x52, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x55, 0x0a, 0x0d, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0f, 0x0a, 0x0b, + 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, + 0x12, 0x0a, 0x0e, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x53, 0x45, + 0x54, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x41, 0x4e, 0x59, + 0x10, 0x03, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, + 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_federateswith_proto_rawDescOnce sync.Once + file_spire_api_types_federateswith_proto_rawDescData = file_spire_api_types_federateswith_proto_rawDesc +) + +func file_spire_api_types_federateswith_proto_rawDescGZIP() []byte { + file_spire_api_types_federateswith_proto_rawDescOnce.Do(func() { + file_spire_api_types_federateswith_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_federateswith_proto_rawDescData) + }) + return file_spire_api_types_federateswith_proto_rawDescData +} + +var file_spire_api_types_federateswith_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_federateswith_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_federateswith_proto_goTypes = []interface{}{ + (FederatesWithMatch_MatchBehavior)(0), // 0: spire.api.types.FederatesWithMatch.MatchBehavior + (*FederatesWithMatch)(nil), // 1: spire.api.types.FederatesWithMatch +} +var file_spire_api_types_federateswith_proto_depIdxs = []int32{ + 0, // 0: spire.api.types.FederatesWithMatch.match:type_name -> spire.api.types.FederatesWithMatch.MatchBehavior + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_federateswith_proto_init() } +func file_spire_api_types_federateswith_proto_init() { + if File_spire_api_types_federateswith_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_federateswith_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederatesWithMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_federateswith_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_federateswith_proto_goTypes, + DependencyIndexes: file_spire_api_types_federateswith_proto_depIdxs, + EnumInfos: file_spire_api_types_federateswith_proto_enumTypes, + MessageInfos: file_spire_api_types_federateswith_proto_msgTypes, + }.Build() + File_spire_api_types_federateswith_proto = out.File + file_spire_api_types_federateswith_proto_rawDesc = nil + file_spire_api_types_federateswith_proto_goTypes = nil + file_spire_api_types_federateswith_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto new file mode 100644 index 0000000000..a5be5f85b2 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federateswith.proto @@ -0,0 +1,68 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message FederatesWithMatch { + enum MatchBehavior { + // Indicates that the federated trust domains in this match are + // equal to the candidate trust domains, independent of ordering. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_EXACT ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e2' + MATCH_EXACT = 0; + + // Indicates that all candidates which have a non-empty subset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUBSET ["spiffe://td1"] + // Entries that match: + // - 'e1' + MATCH_SUBSET = 1; + + // Indicate that all candidates which are a superset + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_SUPERSET ["spiffe://td1", "spiffe://td2"] + // Entries that match: + // - 'e1' + // - 'e2' + MATCH_SUPERSET = 2; + + // Indicates that all candidates which have at least one + // of the provided set of trust domains will match. + // Example: + // Given: + // - e1 { FederatesWith: ["spiffe://td1", "spiffe://td2", "spiffe://td3"]} + // - e2 { FederatesWith: ["spiffe://td1", "spiffe://td2"]} + // - e3 { FederatesWith: ["spiffe://td1"]} + // Operation: + // - MATCH_ANY ["spiffe://td1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + MATCH_ANY = 3; + } + + // The set of trust domain names to match on (e.g., "example.org"). + repeated string trust_domains = 1; + + // How to match the trust domains. + MatchBehavior match = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go new file mode 100644 index 0000000000..ee4b0a57a7 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.pb.go @@ -0,0 +1,464 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/federationrelationship.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type FederationRelationship struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The trust domain name (e.g., "example.org") to federate with. + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // Required. URL of the SPIFFE bundle endpoint that provides the trust + // bundle to federate with. Must use the HTTPS protocol. + BundleEndpointUrl string `protobuf:"bytes,2,opt,name=bundle_endpoint_url,json=bundleEndpointUrl,proto3" json:"bundle_endpoint_url,omitempty"` + // Required. The endpoint profile type. + // + // Types that are assignable to BundleEndpointProfile: + // *FederationRelationship_HttpsWeb + // *FederationRelationship_HttpsSpiffe + BundleEndpointProfile isFederationRelationship_BundleEndpointProfile `protobuf_oneof:"bundle_endpoint_profile"` + // Optional. The bundle for the trust domain. This field can be used to + // create or replace the referenced trust domains' bundle when the + // relationship is created or updated. When the relationship is retrieved, + // it will be set to the referenced trust domain's latest bundle (if + // available). Please note that the `https_spiffe` profile requires an + // existing trust domain bundle in order to function correctly. The + // required bundle must match the trust domain specified in the bundle + // endpoint SPIFFE ID. If the bundle endpoint SPIFFE ID resides in the same + // trust domain that you're trying to federate with, you may optionally + // specify that trust domain bundle here. If the bundle endpoint SPIFFE ID + // _does not_ reside in the same trust domain that you're federating with, + // please ensure that the trust domain bundle for that trust domain has + // been configured separately (e.g. configured via another federation + // relationship or manually set via the Bundle API). + TrustDomainBundle *Bundle `protobuf:"bytes,5,opt,name=trust_domain_bundle,json=trustDomainBundle,proto3" json:"trust_domain_bundle,omitempty"` +} + +func (x *FederationRelationship) Reset() { + *x = FederationRelationship{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederationRelationship) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederationRelationship) ProtoMessage() {} + +func (x *FederationRelationship) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederationRelationship.ProtoReflect.Descriptor instead. +func (*FederationRelationship) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{0} +} + +func (x *FederationRelationship) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *FederationRelationship) GetBundleEndpointUrl() string { + if x != nil { + return x.BundleEndpointUrl + } + return "" +} + +func (m *FederationRelationship) GetBundleEndpointProfile() isFederationRelationship_BundleEndpointProfile { + if m != nil { + return m.BundleEndpointProfile + } + return nil +} + +func (x *FederationRelationship) GetHttpsWeb() *HTTPSWebProfile { + if x, ok := x.GetBundleEndpointProfile().(*FederationRelationship_HttpsWeb); ok { + return x.HttpsWeb + } + return nil +} + +func (x *FederationRelationship) GetHttpsSpiffe() *HTTPSSPIFFEProfile { + if x, ok := x.GetBundleEndpointProfile().(*FederationRelationship_HttpsSpiffe); ok { + return x.HttpsSpiffe + } + return nil +} + +func (x *FederationRelationship) GetTrustDomainBundle() *Bundle { + if x != nil { + return x.TrustDomainBundle + } + return nil +} + +type isFederationRelationship_BundleEndpointProfile interface { + isFederationRelationship_BundleEndpointProfile() +} + +type FederationRelationship_HttpsWeb struct { + // Use Web PKI endpoint profile. + HttpsWeb *HTTPSWebProfile `protobuf:"bytes,3,opt,name=https_web,json=httpsWeb,proto3,oneof"` +} + +type FederationRelationship_HttpsSpiffe struct { + // Use SPIFFE Authentication endpoint profile. + HttpsSpiffe *HTTPSSPIFFEProfile `protobuf:"bytes,4,opt,name=https_spiffe,json=httpsSpiffe,proto3,oneof"` +} + +func (*FederationRelationship_HttpsWeb) isFederationRelationship_BundleEndpointProfile() {} + +func (*FederationRelationship_HttpsSpiffe) isFederationRelationship_BundleEndpointProfile() {} + +type HTTPSSPIFFEProfile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Specifies the expected SPIFFE ID of the SPIFFE bundle endpoint + // server. + EndpointSpiffeId string `protobuf:"bytes,1,opt,name=endpoint_spiffe_id,json=endpointSpiffeId,proto3" json:"endpoint_spiffe_id,omitempty"` +} + +func (x *HTTPSSPIFFEProfile) Reset() { + *x = HTTPSSPIFFEProfile{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPSSPIFFEProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPSSPIFFEProfile) ProtoMessage() {} + +func (x *HTTPSSPIFFEProfile) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPSSPIFFEProfile.ProtoReflect.Descriptor instead. +func (*HTTPSSPIFFEProfile) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{1} +} + +func (x *HTTPSSPIFFEProfile) GetEndpointSpiffeId() string { + if x != nil { + return x.EndpointSpiffeId + } + return "" +} + +type HTTPSWebProfile struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *HTTPSWebProfile) Reset() { + *x = HTTPSWebProfile{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HTTPSWebProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HTTPSWebProfile) ProtoMessage() {} + +func (x *HTTPSWebProfile) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HTTPSWebProfile.ProtoReflect.Descriptor instead. +func (*HTTPSWebProfile) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{2} +} + +type FederationRelationshipMask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // bundle_endpoint_url field mask. + BundleEndpointUrl bool `protobuf:"varint,1,opt,name=bundle_endpoint_url,json=bundleEndpointUrl,proto3" json:"bundle_endpoint_url,omitempty"` + // bundle_endpoint_profile field mask. + BundleEndpointProfile bool `protobuf:"varint,2,opt,name=bundle_endpoint_profile,json=bundleEndpointProfile,proto3" json:"bundle_endpoint_profile,omitempty"` + // trust_domain_bundle field mask. + TrustDomainBundle bool `protobuf:"varint,3,opt,name=trust_domain_bundle,json=trustDomainBundle,proto3" json:"trust_domain_bundle,omitempty"` +} + +func (x *FederationRelationshipMask) Reset() { + *x = FederationRelationshipMask{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FederationRelationshipMask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FederationRelationshipMask) ProtoMessage() {} + +func (x *FederationRelationshipMask) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_federationrelationship_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FederationRelationshipMask.ProtoReflect.Descriptor instead. +func (*FederationRelationshipMask) Descriptor() ([]byte, []int) { + return file_spire_api_types_federationrelationship_proto_rawDescGZIP(), []int{3} +} + +func (x *FederationRelationshipMask) GetBundleEndpointUrl() bool { + if x != nil { + return x.BundleEndpointUrl + } + return false +} + +func (x *FederationRelationshipMask) GetBundleEndpointProfile() bool { + if x != nil { + return x.BundleEndpointProfile + } + return false +} + +func (x *FederationRelationshipMask) GetTrustDomainBundle() bool { + if x != nil { + return x.TrustDomainBundle + } + return false +} + +var File_spire_api_types_federationrelationship_proto protoreflect.FileDescriptor + +var file_spire_api_types_federationrelationship_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x66, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x72, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, + 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x02, + 0x0a, 0x16, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6c, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x6c, 0x12, 0x3f, 0x0a, 0x09, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x5f, 0x77, 0x65, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x48, 0x54, 0x54, 0x50, 0x53, 0x57, 0x65, 0x62, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x48, 0x00, 0x52, 0x08, 0x68, 0x74, 0x74, 0x70, 0x73, 0x57, 0x65, 0x62, 0x12, 0x48, 0x0a, 0x0c, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x48, 0x54, 0x54, 0x50, 0x53, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x12, 0x47, 0x0a, 0x13, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x11, 0x74, 0x72, + 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, + 0x19, 0x0a, 0x17, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x42, 0x0a, 0x12, 0x48, 0x54, + 0x54, 0x50, 0x53, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x12, 0x2c, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x69, + 0x66, 0x66, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x53, 0x70, 0x69, 0x66, 0x66, 0x65, 0x49, 0x64, 0x22, 0x11, + 0x0a, 0x0f, 0x48, 0x54, 0x54, 0x50, 0x53, 0x57, 0x65, 0x62, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1a, 0x46, 0x65, 0x64, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x68, 0x69, 0x70, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x2e, 0x0a, 0x13, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x62, + 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x6c, + 0x12, 0x36, 0x0a, 0x17, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x15, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x72, 0x75, 0x73, + 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x42, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_federationrelationship_proto_rawDescOnce sync.Once + file_spire_api_types_federationrelationship_proto_rawDescData = file_spire_api_types_federationrelationship_proto_rawDesc +) + +func file_spire_api_types_federationrelationship_proto_rawDescGZIP() []byte { + file_spire_api_types_federationrelationship_proto_rawDescOnce.Do(func() { + file_spire_api_types_federationrelationship_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_federationrelationship_proto_rawDescData) + }) + return file_spire_api_types_federationrelationship_proto_rawDescData +} + +var file_spire_api_types_federationrelationship_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_spire_api_types_federationrelationship_proto_goTypes = []interface{}{ + (*FederationRelationship)(nil), // 0: spire.api.types.FederationRelationship + (*HTTPSSPIFFEProfile)(nil), // 1: spire.api.types.HTTPSSPIFFEProfile + (*HTTPSWebProfile)(nil), // 2: spire.api.types.HTTPSWebProfile + (*FederationRelationshipMask)(nil), // 3: spire.api.types.FederationRelationshipMask + (*Bundle)(nil), // 4: spire.api.types.Bundle +} +var file_spire_api_types_federationrelationship_proto_depIdxs = []int32{ + 2, // 0: spire.api.types.FederationRelationship.https_web:type_name -> spire.api.types.HTTPSWebProfile + 1, // 1: spire.api.types.FederationRelationship.https_spiffe:type_name -> spire.api.types.HTTPSSPIFFEProfile + 4, // 2: spire.api.types.FederationRelationship.trust_domain_bundle:type_name -> spire.api.types.Bundle + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_spire_api_types_federationrelationship_proto_init() } +func file_spire_api_types_federationrelationship_proto_init() { + if File_spire_api_types_federationrelationship_proto != nil { + return + } + file_spire_api_types_bundle_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_federationrelationship_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederationRelationship); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPSSPIFFEProfile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HTTPSWebProfile); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FederationRelationshipMask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_spire_api_types_federationrelationship_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*FederationRelationship_HttpsWeb)(nil), + (*FederationRelationship_HttpsSpiffe)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_federationrelationship_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_federationrelationship_proto_goTypes, + DependencyIndexes: file_spire_api_types_federationrelationship_proto_depIdxs, + MessageInfos: file_spire_api_types_federationrelationship_proto_msgTypes, + }.Build() + File_spire_api_types_federationrelationship_proto = out.File + file_spire_api_types_federationrelationship_proto_rawDesc = nil + file_spire_api_types_federationrelationship_proto_goTypes = nil + file_spire_api_types_federationrelationship_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto new file mode 100644 index 0000000000..7801e22e86 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/federationrelationship.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/bundle.proto"; + +message FederationRelationship { + // Required. The trust domain name (e.g., "example.org") to federate with. + string trust_domain = 1; + + // Required. URL of the SPIFFE bundle endpoint that provides the trust + // bundle to federate with. Must use the HTTPS protocol. + string bundle_endpoint_url = 2; + + // Required. The endpoint profile type. + oneof bundle_endpoint_profile { + // Use Web PKI endpoint profile. + HTTPSWebProfile https_web = 3; + + // Use SPIFFE Authentication endpoint profile. + HTTPSSPIFFEProfile https_spiffe = 4; + } + + // Optional. The bundle for the trust domain. This field can be used to + // create or replace the referenced trust domains' bundle when the + // relationship is created or updated. When the relationship is retrieved, + // it will be set to the referenced trust domain's latest bundle (if + // available). Please note that the `https_spiffe` profile requires an + // existing trust domain bundle in order to function correctly. The + // required bundle must match the trust domain specified in the bundle + // endpoint SPIFFE ID. If the bundle endpoint SPIFFE ID resides in the same + // trust domain that you're trying to federate with, you may optionally + // specify that trust domain bundle here. If the bundle endpoint SPIFFE ID + // _does not_ reside in the same trust domain that you're federating with, + // please ensure that the trust domain bundle for that trust domain has + // been configured separately (e.g. configured via another federation + // relationship or manually set via the Bundle API). + spire.api.types.Bundle trust_domain_bundle = 5; +} + +message HTTPSSPIFFEProfile { + // Required. Specifies the expected SPIFFE ID of the SPIFFE bundle endpoint + // server. + string endpoint_spiffe_id = 1; +} + +message HTTPSWebProfile { +} + +message FederationRelationshipMask { + // bundle_endpoint_url field mask. + bool bundle_endpoint_url = 1; + + // bundle_endpoint_profile field mask. + bool bundle_endpoint_profile = 2; + + // trust_domain_bundle field mask. + bool trust_domain_bundle = 3; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go new file mode 100644 index 0000000000..48392aa4d2 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.pb.go @@ -0,0 +1,158 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/jointoken.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type JoinToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value of the token. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The token expiration (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,2,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *JoinToken) Reset() { + *x = JoinToken{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_jointoken_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JoinToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JoinToken) ProtoMessage() {} + +func (x *JoinToken) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_jointoken_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JoinToken.ProtoReflect.Descriptor instead. +func (*JoinToken) Descriptor() ([]byte, []int) { + return file_spire_api_types_jointoken_proto_rawDescGZIP(), []int{0} +} + +func (x *JoinToken) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *JoinToken) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +var File_spire_api_types_jointoken_proto protoreflect.FileDescriptor + +var file_spire_api_types_jointoken_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6a, 0x6f, 0x69, 0x6e, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x22, 0x40, 0x0a, 0x09, 0x4a, 0x6f, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, + 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, + 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_jointoken_proto_rawDescOnce sync.Once + file_spire_api_types_jointoken_proto_rawDescData = file_spire_api_types_jointoken_proto_rawDesc +) + +func file_spire_api_types_jointoken_proto_rawDescGZIP() []byte { + file_spire_api_types_jointoken_proto_rawDescOnce.Do(func() { + file_spire_api_types_jointoken_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_jointoken_proto_rawDescData) + }) + return file_spire_api_types_jointoken_proto_rawDescData +} + +var file_spire_api_types_jointoken_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_jointoken_proto_goTypes = []interface{}{ + (*JoinToken)(nil), // 0: spire.api.types.JoinToken +} +var file_spire_api_types_jointoken_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_jointoken_proto_init() } +func file_spire_api_types_jointoken_proto_init() { + if File_spire_api_types_jointoken_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_jointoken_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JoinToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_jointoken_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_jointoken_proto_goTypes, + DependencyIndexes: file_spire_api_types_jointoken_proto_depIdxs, + MessageInfos: file_spire_api_types_jointoken_proto_msgTypes, + }.Build() + File_spire_api_types_jointoken_proto = out.File + file_spire_api_types_jointoken_proto_rawDesc = nil + file_spire_api_types_jointoken_proto_goTypes = nil + file_spire_api_types_jointoken_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto new file mode 100644 index 0000000000..f12a9a30ad --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jointoken.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message JoinToken { + // The value of the token. + string value = 1; + + // The token expiration (seconds since Unix epoch). + int64 expires_at = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go new file mode 100644 index 0000000000..ec9f7cbfa8 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.pb.go @@ -0,0 +1,187 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/jwtsvid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// JWT SPIFFE Verifiable Identity Document. It contains the raw JWT token +// as well as a few denormalized fields for convenience. +type JWTSVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The serialized JWT token. + Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"` + // The SPIFFE ID of the JWT-SVID. + Id *SPIFFEID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Expiration timestamp (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // Issuance timestamp (seconds since Unix epoch). + IssuedAt int64 `protobuf:"varint,4,opt,name=issued_at,json=issuedAt,proto3" json:"issued_at,omitempty"` +} + +func (x *JWTSVID) Reset() { + *x = JWTSVID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_jwtsvid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JWTSVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JWTSVID) ProtoMessage() {} + +func (x *JWTSVID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_jwtsvid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JWTSVID.ProtoReflect.Descriptor instead. +func (*JWTSVID) Descriptor() ([]byte, []int) { + return file_spire_api_types_jwtsvid_proto_rawDescGZIP(), []int{0} +} + +func (x *JWTSVID) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *JWTSVID) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *JWTSVID) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +func (x *JWTSVID) GetIssuedAt() int64 { + if x != nil { + return x.IssuedAt + } + return 0 +} + +var File_spire_api_types_jwtsvid_proto protoreflect.FileDescriptor + +var file_spire_api_types_jwtsvid_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x6a, 0x77, 0x74, 0x73, 0x76, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x86, 0x01, 0x0a, 0x07, 0x4a, 0x57, 0x54, 0x53, 0x56, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x1b, 0x0a, 0x09, + 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_jwtsvid_proto_rawDescOnce sync.Once + file_spire_api_types_jwtsvid_proto_rawDescData = file_spire_api_types_jwtsvid_proto_rawDesc +) + +func file_spire_api_types_jwtsvid_proto_rawDescGZIP() []byte { + file_spire_api_types_jwtsvid_proto_rawDescOnce.Do(func() { + file_spire_api_types_jwtsvid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_jwtsvid_proto_rawDescData) + }) + return file_spire_api_types_jwtsvid_proto_rawDescData +} + +var file_spire_api_types_jwtsvid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_jwtsvid_proto_goTypes = []interface{}{ + (*JWTSVID)(nil), // 0: spire.api.types.JWTSVID + (*SPIFFEID)(nil), // 1: spire.api.types.SPIFFEID +} +var file_spire_api_types_jwtsvid_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.JWTSVID.id:type_name -> spire.api.types.SPIFFEID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_jwtsvid_proto_init() } +func file_spire_api_types_jwtsvid_proto_init() { + if File_spire_api_types_jwtsvid_proto != nil { + return + } + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_jwtsvid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JWTSVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_jwtsvid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_jwtsvid_proto_goTypes, + DependencyIndexes: file_spire_api_types_jwtsvid_proto_depIdxs, + MessageInfos: file_spire_api_types_jwtsvid_proto_msgTypes, + }.Build() + File_spire_api_types_jwtsvid_proto = out.File + file_spire_api_types_jwtsvid_proto_rawDesc = nil + file_spire_api_types_jwtsvid_proto_goTypes = nil + file_spire_api_types_jwtsvid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto new file mode 100644 index 0000000000..51f840a435 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/jwtsvid.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/spiffeid.proto"; + +// JWT SPIFFE Verifiable Identity Document. It contains the raw JWT token +// as well as a few denormalized fields for convenience. +message JWTSVID { + // The serialized JWT token. + string token = 1; + + // The SPIFFE ID of the JWT-SVID. + spire.api.types.SPIFFEID id = 2; + + // Expiration timestamp (seconds since Unix epoch). + int64 expires_at = 3; + + // Issuance timestamp (seconds since Unix epoch). + int64 issued_at = 4; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go new file mode 100644 index 0000000000..49ed9323d1 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/selector.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SelectorMatch_MatchBehavior int32 + +const ( + // Indicates that the selectors in this match are equal to the + // candidate selectors, independent of ordering. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_EXACT ["a:1", "b:2"] + // Entries that match: + // - 'e2' + SelectorMatch_MATCH_EXACT SelectorMatch_MatchBehavior = 0 + // Indicates that all candidates which have a non-empty subset + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUBSET ["a:1"] + // Entries that match: + // - 'e1' + SelectorMatch_MATCH_SUBSET SelectorMatch_MatchBehavior = 1 + // Indicates that all candidates which are a superset + // of the provided selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUPERSET ["a:1", "b:2"] + // Entries that match: + // - 'e1' + // - 'e2' + SelectorMatch_MATCH_SUPERSET SelectorMatch_MatchBehavior = 2 + // Indicates that all candidates which have at least one + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_ANY ["a:1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + SelectorMatch_MATCH_ANY SelectorMatch_MatchBehavior = 3 +) + +// Enum value maps for SelectorMatch_MatchBehavior. +var ( + SelectorMatch_MatchBehavior_name = map[int32]string{ + 0: "MATCH_EXACT", + 1: "MATCH_SUBSET", + 2: "MATCH_SUPERSET", + 3: "MATCH_ANY", + } + SelectorMatch_MatchBehavior_value = map[string]int32{ + "MATCH_EXACT": 0, + "MATCH_SUBSET": 1, + "MATCH_SUPERSET": 2, + "MATCH_ANY": 3, + } +) + +func (x SelectorMatch_MatchBehavior) Enum() *SelectorMatch_MatchBehavior { + p := new(SelectorMatch_MatchBehavior) + *p = x + return p +} + +func (x SelectorMatch_MatchBehavior) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SelectorMatch_MatchBehavior) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_selector_proto_enumTypes[0].Descriptor() +} + +func (SelectorMatch_MatchBehavior) Type() protoreflect.EnumType { + return &file_spire_api_types_selector_proto_enumTypes[0] +} + +func (x SelectorMatch_MatchBehavior) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SelectorMatch_MatchBehavior.Descriptor instead. +func (SelectorMatch_MatchBehavior) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{1, 0} +} + +type Selector struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The type of the selector. This is typically the name of the plugin that + // produces the selector. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // The value of the selector. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Selector) Reset() { + *x = Selector{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_selector_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Selector) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Selector) ProtoMessage() {} + +func (x *Selector) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_selector_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Selector.ProtoReflect.Descriptor instead. +func (*Selector) Descriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{0} +} + +func (x *Selector) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Selector) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +type SelectorMatch struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The set of selectors to match on. + Selectors []*Selector `protobuf:"bytes,1,rep,name=selectors,proto3" json:"selectors,omitempty"` + // How to match the selectors. + Match SelectorMatch_MatchBehavior `protobuf:"varint,2,opt,name=match,proto3,enum=spire.api.types.SelectorMatch_MatchBehavior" json:"match,omitempty"` +} + +func (x *SelectorMatch) Reset() { + *x = SelectorMatch{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_selector_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelectorMatch) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelectorMatch) ProtoMessage() {} + +func (x *SelectorMatch) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_selector_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelectorMatch.ProtoReflect.Descriptor instead. +func (*SelectorMatch) Descriptor() ([]byte, []int) { + return file_spire_api_types_selector_proto_rawDescGZIP(), []int{1} +} + +func (x *SelectorMatch) GetSelectors() []*Selector { + if x != nil { + return x.Selectors + } + return nil +} + +func (x *SelectorMatch) GetMatch() SelectorMatch_MatchBehavior { + if x != nil { + return x.Match + } + return SelectorMatch_MATCH_EXACT +} + +var File_spire_api_types_selector_proto protoreflect.FileDescriptor + +var file_spire_api_types_selector_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x22, 0x34, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x01, 0x0a, 0x0d, 0x53, 0x65, 0x6c, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x37, 0x0a, 0x09, 0x73, 0x65, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, + 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x73, 0x12, 0x42, 0x0a, 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x2c, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, + 0x05, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x55, 0x0a, 0x0d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x5f, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x41, 0x54, 0x43, + 0x48, 0x5f, 0x53, 0x55, 0x42, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4d, 0x41, + 0x54, 0x43, 0x48, 0x5f, 0x53, 0x55, 0x50, 0x45, 0x52, 0x53, 0x45, 0x54, 0x10, 0x02, 0x12, 0x0d, + 0x0a, 0x09, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0x03, 0x42, 0x37, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, + 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_selector_proto_rawDescOnce sync.Once + file_spire_api_types_selector_proto_rawDescData = file_spire_api_types_selector_proto_rawDesc +) + +func file_spire_api_types_selector_proto_rawDescGZIP() []byte { + file_spire_api_types_selector_proto_rawDescOnce.Do(func() { + file_spire_api_types_selector_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_selector_proto_rawDescData) + }) + return file_spire_api_types_selector_proto_rawDescData +} + +var file_spire_api_types_selector_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_selector_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_selector_proto_goTypes = []interface{}{ + (SelectorMatch_MatchBehavior)(0), // 0: spire.api.types.SelectorMatch.MatchBehavior + (*Selector)(nil), // 1: spire.api.types.Selector + (*SelectorMatch)(nil), // 2: spire.api.types.SelectorMatch +} +var file_spire_api_types_selector_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.SelectorMatch.selectors:type_name -> spire.api.types.Selector + 0, // 1: spire.api.types.SelectorMatch.match:type_name -> spire.api.types.SelectorMatch.MatchBehavior + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_spire_api_types_selector_proto_init() } +func file_spire_api_types_selector_proto_init() { + if File_spire_api_types_selector_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_selector_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Selector); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_selector_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelectorMatch); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_selector_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_selector_proto_goTypes, + DependencyIndexes: file_spire_api_types_selector_proto_depIdxs, + EnumInfos: file_spire_api_types_selector_proto_enumTypes, + MessageInfos: file_spire_api_types_selector_proto_msgTypes, + }.Build() + File_spire_api_types_selector_proto = out.File + file_spire_api_types_selector_proto_rawDesc = nil + file_spire_api_types_selector_proto_goTypes = nil + file_spire_api_types_selector_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto new file mode 100644 index 0000000000..522017587f --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/selector.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Selector { + // The type of the selector. This is typically the name of the plugin that + // produces the selector. + string type = 1; + + // The value of the selector. + string value = 2; +} + +message SelectorMatch { + enum MatchBehavior { + // Indicates that the selectors in this match are equal to the + // candidate selectors, independent of ordering. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_EXACT ["a:1", "b:2"] + // Entries that match: + // - 'e2' + MATCH_EXACT = 0; + + // Indicates that all candidates which have a non-empty subset + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUBSET ["a:1"] + // Entries that match: + // - 'e1' + MATCH_SUBSET = 1; + + // Indicates that all candidates which are a superset + // of the provided selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_SUPERSET ["a:1", "b:2"] + // Entries that match: + // - 'e1' + // - 'e2' + MATCH_SUPERSET = 2; + + // Indicates that all candidates which have at least one + // of the provided set of selectors will match. + // Example: + // Given: + // - 'e1 { Selectors: ["a:1", "b:2", "c:3"]}' + // - 'e2 { Selectors: ["a:1", "b:2"]}' + // - 'e3 { Selectors: ["a:1"]}' + // Operation: + // - MATCH_ANY ["a:1"] + // Entries that match: + // - 'e1' + // - 'e2' + // - 'e3' + MATCH_ANY = 3; + + } + + // The set of selectors to match on. + repeated Selector selectors = 1; + + // How to match the selectors. + MatchBehavior match = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go new file mode 100644 index 0000000000..9fd4b6aa03 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.pb.go @@ -0,0 +1,162 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/spiffeid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// A SPIFFE ID, consisting of the trust domain name and a path portions of +// the SPIFFE ID URI. +type SPIFFEID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Trust domain portion the SPIFFE ID (e.g. "example.org") + TrustDomain string `protobuf:"bytes,1,opt,name=trust_domain,json=trustDomain,proto3" json:"trust_domain,omitempty"` + // The path component of the SPIFFE ID (e.g. "/foo/bar/baz"). The path + // SHOULD have a leading slash. Consumers MUST normalize the path before + // making any sort of comparison between IDs. + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` +} + +func (x *SPIFFEID) Reset() { + *x = SPIFFEID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_spiffeid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SPIFFEID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SPIFFEID) ProtoMessage() {} + +func (x *SPIFFEID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_spiffeid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SPIFFEID.ProtoReflect.Descriptor instead. +func (*SPIFFEID) Descriptor() ([]byte, []int) { + return file_spire_api_types_spiffeid_proto_rawDescGZIP(), []int{0} +} + +func (x *SPIFFEID) GetTrustDomain() string { + if x != nil { + return x.TrustDomain + } + return "" +} + +func (x *SPIFFEID) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +var File_spire_api_types_spiffeid_proto protoreflect.FileDescriptor + +var file_spire_api_types_spiffeid_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x22, 0x41, 0x0a, 0x08, 0x53, 0x50, 0x49, 0x46, 0x46, 0x45, 0x49, 0x44, 0x12, 0x21, 0x0a, + 0x0c, 0x74, 0x72, 0x75, 0x73, 0x74, 0x5f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x72, 0x75, 0x73, 0x74, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x70, 0x61, 0x74, 0x68, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2d, + 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x70, + 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_spiffeid_proto_rawDescOnce sync.Once + file_spire_api_types_spiffeid_proto_rawDescData = file_spire_api_types_spiffeid_proto_rawDesc +) + +func file_spire_api_types_spiffeid_proto_rawDescGZIP() []byte { + file_spire_api_types_spiffeid_proto_rawDescOnce.Do(func() { + file_spire_api_types_spiffeid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_spiffeid_proto_rawDescData) + }) + return file_spire_api_types_spiffeid_proto_rawDescData +} + +var file_spire_api_types_spiffeid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_spiffeid_proto_goTypes = []interface{}{ + (*SPIFFEID)(nil), // 0: spire.api.types.SPIFFEID +} +var file_spire_api_types_spiffeid_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_spire_api_types_spiffeid_proto_init() } +func file_spire_api_types_spiffeid_proto_init() { + if File_spire_api_types_spiffeid_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_spiffeid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SPIFFEID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_spiffeid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_spiffeid_proto_goTypes, + DependencyIndexes: file_spire_api_types_spiffeid_proto_depIdxs, + MessageInfos: file_spire_api_types_spiffeid_proto_msgTypes, + }.Build() + File_spire_api_types_spiffeid_proto = out.File + file_spire_api_types_spiffeid_proto_rawDesc = nil + file_spire_api_types_spiffeid_proto_goTypes = nil + file_spire_api_types_spiffeid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto new file mode 100644 index 0000000000..37ad35b273 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/spiffeid.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +// A SPIFFE ID, consisting of the trust domain name and a path portions of +// the SPIFFE ID URI. +message SPIFFEID { + // Trust domain portion the SPIFFE ID (e.g. "example.org") + string trust_domain = 1; + + // The path component of the SPIFFE ID (e.g. "/foo/bar/baz"). The path + // SHOULD have a leading slash. Consumers MUST normalize the path before + // making any sort of comparison between IDs. + string path = 2; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go new file mode 100644 index 0000000000..210d8056c2 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.pb.go @@ -0,0 +1,294 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/status.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type PermissionDeniedDetails_Reason int32 + +const ( + // Reason unknown. + PermissionDeniedDetails_UNKNOWN PermissionDeniedDetails_Reason = 0 + // Agent identity has expired. + PermissionDeniedDetails_AGENT_EXPIRED PermissionDeniedDetails_Reason = 1 + // Identity is not an attested agent. + PermissionDeniedDetails_AGENT_NOT_ATTESTED PermissionDeniedDetails_Reason = 2 + // Identity is not the active agent identity. + PermissionDeniedDetails_AGENT_NOT_ACTIVE PermissionDeniedDetails_Reason = 3 + // Agent has been banned. + PermissionDeniedDetails_AGENT_BANNED PermissionDeniedDetails_Reason = 4 +) + +// Enum value maps for PermissionDeniedDetails_Reason. +var ( + PermissionDeniedDetails_Reason_name = map[int32]string{ + 0: "UNKNOWN", + 1: "AGENT_EXPIRED", + 2: "AGENT_NOT_ATTESTED", + 3: "AGENT_NOT_ACTIVE", + 4: "AGENT_BANNED", + } + PermissionDeniedDetails_Reason_value = map[string]int32{ + "UNKNOWN": 0, + "AGENT_EXPIRED": 1, + "AGENT_NOT_ATTESTED": 2, + "AGENT_NOT_ACTIVE": 3, + "AGENT_BANNED": 4, + } +) + +func (x PermissionDeniedDetails_Reason) Enum() *PermissionDeniedDetails_Reason { + p := new(PermissionDeniedDetails_Reason) + *p = x + return p +} + +func (x PermissionDeniedDetails_Reason) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PermissionDeniedDetails_Reason) Descriptor() protoreflect.EnumDescriptor { + return file_spire_api_types_status_proto_enumTypes[0].Descriptor() +} + +func (PermissionDeniedDetails_Reason) Type() protoreflect.EnumType { + return &file_spire_api_types_status_proto_enumTypes[0] +} + +func (x PermissionDeniedDetails_Reason) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PermissionDeniedDetails_Reason.Descriptor instead. +func (PermissionDeniedDetails_Reason) EnumDescriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{1, 0} +} + +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A status code, which should be an enum value of google.rpc.Code. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_status_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_status_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{0} +} + +func (x *Status) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type PermissionDeniedDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reason PermissionDeniedDetails_Reason `protobuf:"varint,1,opt,name=reason,proto3,enum=spire.api.types.PermissionDeniedDetails_Reason" json:"reason,omitempty"` +} + +func (x *PermissionDeniedDetails) Reset() { + *x = PermissionDeniedDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_status_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PermissionDeniedDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PermissionDeniedDetails) ProtoMessage() {} + +func (x *PermissionDeniedDetails) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_status_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PermissionDeniedDetails.ProtoReflect.Descriptor instead. +func (*PermissionDeniedDetails) Descriptor() ([]byte, []int) { + return file_spire_api_types_status_proto_rawDescGZIP(), []int{1} +} + +func (x *PermissionDeniedDetails) GetReason() PermissionDeniedDetails_Reason { + if x != nil { + return x.Reason + } + return PermissionDeniedDetails_UNKNOWN +} + +var File_spire_api_types_status_proto protoreflect.FileDescriptor + +var file_spire_api_types_status_proto_rawDesc = []byte{ + 0x0a, 0x1c, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0x36, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xcc, 0x01, 0x0a, 0x17, 0x50, 0x65, 0x72, 0x6d, + 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x12, 0x47, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x52, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x68, 0x0a, 0x06, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x58, 0x50, + 0x49, 0x52, 0x45, 0x44, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, + 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x54, 0x54, 0x45, 0x53, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x14, + 0x0a, 0x10, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, + 0x56, 0x45, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x42, 0x41, + 0x4e, 0x4e, 0x45, 0x44, 0x10, 0x04, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_status_proto_rawDescOnce sync.Once + file_spire_api_types_status_proto_rawDescData = file_spire_api_types_status_proto_rawDesc +) + +func file_spire_api_types_status_proto_rawDescGZIP() []byte { + file_spire_api_types_status_proto_rawDescOnce.Do(func() { + file_spire_api_types_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_status_proto_rawDescData) + }) + return file_spire_api_types_status_proto_rawDescData +} + +var file_spire_api_types_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_spire_api_types_status_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_spire_api_types_status_proto_goTypes = []interface{}{ + (PermissionDeniedDetails_Reason)(0), // 0: spire.api.types.PermissionDeniedDetails.Reason + (*Status)(nil), // 1: spire.api.types.Status + (*PermissionDeniedDetails)(nil), // 2: spire.api.types.PermissionDeniedDetails +} +var file_spire_api_types_status_proto_depIdxs = []int32{ + 0, // 0: spire.api.types.PermissionDeniedDetails.reason:type_name -> spire.api.types.PermissionDeniedDetails.Reason + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_status_proto_init() } +func file_spire_api_types_status_proto_init() { + if File_spire_api_types_status_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_spire_api_types_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_spire_api_types_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PermissionDeniedDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_status_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_status_proto_goTypes, + DependencyIndexes: file_spire_api_types_status_proto_depIdxs, + EnumInfos: file_spire_api_types_status_proto_enumTypes, + MessageInfos: file_spire_api_types_status_proto_msgTypes, + }.Build() + File_spire_api_types_status_proto = out.File + file_spire_api_types_status_proto_rawDesc = nil + file_spire_api_types_status_proto_goTypes = nil + file_spire_api_types_status_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto new file mode 100644 index 0000000000..498bd10bb3 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/status.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +message Status { + // A status code, which should be an enum value of google.rpc.Code. + int32 code = 1; + + // A developer-facing error message. + string message = 2; +} + +message PermissionDeniedDetails { + enum Reason { + // Reason unknown. + UNKNOWN = 0; + // Agent identity has expired. + AGENT_EXPIRED = 1; + // Identity is not an attested agent. + AGENT_NOT_ATTESTED = 2; + // Identity is not the active agent identity. + AGENT_NOT_ACTIVE = 3; + // Agent has been banned. + AGENT_BANNED = 4; + } + Reason reason = 1; +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go new file mode 100644 index 0000000000..450465e7ae --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.pb.go @@ -0,0 +1,178 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.14.0 +// source: spire/api/types/x509svid.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// X.509 SPIFFE Verifiable Identity Document. It contains the raw X.509 +// certificate data as well as a few denormalized fields for convenience. +type X509SVID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Certificate and intermediates required to form a chain of trust back to + // the X.509 authorities of the trust domain (ASN.1 DER encoded). + CertChain [][]byte `protobuf:"bytes,1,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"` + // SPIFFE ID of the SVID. + Id *SPIFFEID `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + // Expiration timestamp (seconds since Unix epoch). + ExpiresAt int64 `protobuf:"varint,3,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` +} + +func (x *X509SVID) Reset() { + *x = X509SVID{} + if protoimpl.UnsafeEnabled { + mi := &file_spire_api_types_x509svid_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *X509SVID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*X509SVID) ProtoMessage() {} + +func (x *X509SVID) ProtoReflect() protoreflect.Message { + mi := &file_spire_api_types_x509svid_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use X509SVID.ProtoReflect.Descriptor instead. +func (*X509SVID) Descriptor() ([]byte, []int) { + return file_spire_api_types_x509svid_proto_rawDescGZIP(), []int{0} +} + +func (x *X509SVID) GetCertChain() [][]byte { + if x != nil { + return x.CertChain + } + return nil +} + +func (x *X509SVID) GetId() *SPIFFEID { + if x != nil { + return x.Id + } + return nil +} + +func (x *X509SVID) GetExpiresAt() int64 { + if x != nil { + return x.ExpiresAt + } + return 0 +} + +var File_spire_api_types_x509svid_proto protoreflect.FileDescriptor + +var file_spire_api_types_x509svid_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x78, 0x35, 0x30, 0x39, 0x73, 0x76, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0x1e, 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x69, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x73, 0x0a, 0x08, 0x58, 0x35, 0x30, 0x39, 0x53, 0x56, 0x49, 0x44, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x09, 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, 0x29, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x70, 0x69, 0x72, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x50, 0x49, 0x46, 0x46, + 0x45, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x70, 0x69, 0x66, 0x66, 0x65, 0x2f, 0x73, 0x70, 0x69, 0x72, + 0x65, 0x2d, 0x61, 0x70, 0x69, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x73, 0x70, 0x69, 0x72, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_spire_api_types_x509svid_proto_rawDescOnce sync.Once + file_spire_api_types_x509svid_proto_rawDescData = file_spire_api_types_x509svid_proto_rawDesc +) + +func file_spire_api_types_x509svid_proto_rawDescGZIP() []byte { + file_spire_api_types_x509svid_proto_rawDescOnce.Do(func() { + file_spire_api_types_x509svid_proto_rawDescData = protoimpl.X.CompressGZIP(file_spire_api_types_x509svid_proto_rawDescData) + }) + return file_spire_api_types_x509svid_proto_rawDescData +} + +var file_spire_api_types_x509svid_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_spire_api_types_x509svid_proto_goTypes = []interface{}{ + (*X509SVID)(nil), // 0: spire.api.types.X509SVID + (*SPIFFEID)(nil), // 1: spire.api.types.SPIFFEID +} +var file_spire_api_types_x509svid_proto_depIdxs = []int32{ + 1, // 0: spire.api.types.X509SVID.id:type_name -> spire.api.types.SPIFFEID + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_spire_api_types_x509svid_proto_init() } +func file_spire_api_types_x509svid_proto_init() { + if File_spire_api_types_x509svid_proto != nil { + return + } + file_spire_api_types_spiffeid_proto_init() + if !protoimpl.UnsafeEnabled { + file_spire_api_types_x509svid_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*X509SVID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_spire_api_types_x509svid_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_spire_api_types_x509svid_proto_goTypes, + DependencyIndexes: file_spire_api_types_x509svid_proto_depIdxs, + MessageInfos: file_spire_api_types_x509svid_proto_msgTypes, + }.Build() + File_spire_api_types_x509svid_proto = out.File + file_spire_api_types_x509svid_proto_rawDesc = nil + file_spire_api_types_x509svid_proto_goTypes = nil + file_spire_api_types_x509svid_proto_depIdxs = nil +} diff --git a/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto new file mode 100644 index 0000000000..e4b1466ff2 --- /dev/null +++ b/vendor/github.com/spiffe/spire-api-sdk/proto/spire/api/types/x509svid.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package spire.api.types; +option go_package = "github.com/spiffe/spire-api-sdk/proto/spire/api/types"; + +import "spire/api/types/spiffeid.proto"; + +// X.509 SPIFFE Verifiable Identity Document. It contains the raw X.509 +// certificate data as well as a few denormalized fields for convenience. +message X509SVID { + // Certificate and intermediates required to form a chain of trust back to + // the X.509 authorities of the trust domain (ASN.1 DER encoded). + repeated bytes cert_chain = 1; + + // SPIFFE ID of the SVID. + spire.api.types.SPIFFEID id = 2; + + // Expiration timestamp (seconds since Unix epoch). + int64 expires_at = 3; +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go index 943406b2da..aa7656a1fb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go @@ -39,6 +39,8 @@ const ( DefaultManagedByLabelValue = "tekton-pipelines" // DefaultCloudEventSinkValue is the default value for cloud event sinks. DefaultCloudEventSinkValue = "" + // DefaultMaxMatrixCombinationsCount is used when no max matrix combinations count is specified. + DefaultMaxMatrixCombinationsCount = 256 defaultTimeoutMinutesKey = "default-timeout-minutes" defaultServiceAccountKey = "default-service-account" @@ -88,16 +90,18 @@ func (cfg *Defaults) Equals(other *Defaults) bool { other.DefaultPodTemplate.Equals(cfg.DefaultPodTemplate) && other.DefaultAAPodTemplate.Equals(cfg.DefaultAAPodTemplate) && other.DefaultCloudEventsSink == cfg.DefaultCloudEventsSink && - other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding + other.DefaultTaskRunWorkspaceBinding == cfg.DefaultTaskRunWorkspaceBinding && + other.DefaultMaxMatrixCombinationsCount == cfg.DefaultMaxMatrixCombinationsCount } // NewDefaultsFromMap returns a Config given a map corresponding to a ConfigMap func NewDefaultsFromMap(cfgMap map[string]string) (*Defaults, error) { tc := Defaults{ - DefaultTimeoutMinutes: DefaultTimeoutMinutes, - DefaultServiceAccount: DefaultServiceAccountValue, - DefaultManagedByLabelValue: DefaultManagedByLabelValue, - DefaultCloudEventsSink: DefaultCloudEventSinkValue, + DefaultTimeoutMinutes: DefaultTimeoutMinutes, + DefaultServiceAccount: DefaultServiceAccountValue, + DefaultManagedByLabelValue: DefaultManagedByLabelValue, + DefaultCloudEventsSink: DefaultCloudEventSinkValue, + DefaultMaxMatrixCombinationsCount: DefaultMaxMatrixCombinationsCount, } if defaultTimeoutMin, ok := cfgMap[defaultTimeoutMinutesKey]; ok { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go index 17ef479ae6..c294daa608 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go @@ -17,6 +17,7 @@ limitations under the License. package config import ( + "context" "fmt" "os" "strconv" @@ -45,6 +46,8 @@ const ( DefaultDisableCredsInit = false // DefaultRunningInEnvWithInjectedSidecars is the default value for "running-in-environment-with-injected-sidecars". DefaultRunningInEnvWithInjectedSidecars = true + // DefaultAwaitSidecarReadiness is the default value for "await-sidecar-readiness". + DefaultAwaitSidecarReadiness = true // DefaultRequireGitSSHSecretKnownHosts is the default value for "require-git-ssh-secret-known-hosts". DefaultRequireGitSSHSecretKnownHosts = false // DefaultEnableTektonOciBundles is the default value for "enable-tekton-oci-bundles". @@ -57,16 +60,20 @@ const ( DefaultSendCloudEventsForRuns = false // DefaultEmbeddedStatus is the default value for "embedded-status". DefaultEmbeddedStatus = FullEmbeddedStatus + // DefaultEnableSpire is the default value for "enable-spire". + DefaultEnableSpire = false disableAffinityAssistantKey = "disable-affinity-assistant" disableCredsInitKey = "disable-creds-init" runningInEnvWithInjectedSidecarsKey = "running-in-environment-with-injected-sidecars" + awaitSidecarReadinessKey = "await-sidecar-readiness" requireGitSSHSecretKnownHostsKey = "require-git-ssh-secret-known-hosts" // nolint: gosec enableTektonOCIBundles = "enable-tekton-oci-bundles" enableCustomTasks = "enable-custom-tasks" enableAPIFields = "enable-api-fields" sendCloudEventsForRuns = "send-cloudevents-for-runs" embeddedStatus = "embedded-status" + enableSpire = "enable-spire" ) // FeatureFlags holds the features configurations @@ -81,7 +88,9 @@ type FeatureFlags struct { ScopeWhenExpressionsToTask bool EnableAPIFields string SendCloudEventsForRuns bool + AwaitSidecarReadiness bool EmbeddedStatus string + EnableSpire bool } // GetFeatureFlagsConfigName returns the name of the configmap containing all @@ -118,6 +127,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(runningInEnvWithInjectedSidecarsKey, DefaultRunningInEnvWithInjectedSidecars, &tc.RunningInEnvWithInjectedSidecars); err != nil { return nil, err } + if err := setFeature(awaitSidecarReadinessKey, DefaultAwaitSidecarReadiness, &tc.AwaitSidecarReadiness); err != nil { + return nil, err + } if err := setFeature(requireGitSSHSecretKnownHostsKey, DefaultRequireGitSSHSecretKnownHosts, &tc.RequireGitSSHSecretKnownHosts); err != nil { return nil, err } @@ -140,6 +152,7 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if tc.EnableAPIFields == AlphaAPIFields { tc.EnableTektonOCIBundles = true tc.EnableCustomTasks = true + tc.EnableSpire = true } else { if err := setFeature(enableTektonOCIBundles, DefaultEnableTektonOciBundles, &tc.EnableTektonOCIBundles); err != nil { return nil, err @@ -147,6 +160,9 @@ func NewFeatureFlagsFromMap(cfgMap map[string]string) (*FeatureFlags, error) { if err := setFeature(enableCustomTasks, DefaultEnableCustomTasks, &tc.EnableCustomTasks); err != nil { return nil, err } + if err := setFeature(enableSpire, DefaultEnableSpire, &tc.EnableSpire); err != nil { + return nil, err + } } return &tc, nil } @@ -187,3 +203,17 @@ func setEmbeddedStatus(cfgMap map[string]string, defaultValue string, feature *s func NewFeatureFlagsFromConfigMap(config *corev1.ConfigMap) (*FeatureFlags, error) { return NewFeatureFlagsFromMap(config.Data) } + +// EnableAlphaAPIFields enables alpha feature in an existing context (for use in testing) +func EnableAlphaAPIFields(ctx context.Context) context.Context { + featureFlags, _ := NewFeatureFlagsFromMap(map[string]string{ + "enable-api-fields": "alpha", + }) + cfg := &Config{ + Defaults: &Defaults{ + DefaultTimeoutMinutes: 60, + }, + FeatureFlags: featureFlags, + } + return ToContext(ctx, cfg) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/options.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/options.go index 2e75adca4c..6c15c86f36 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/options.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/options.go @@ -16,8 +16,13 @@ limitations under the License. package pipeline +import ( + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" +) + // Options holds options passed to the Tekton Pipeline controllers // typically via command-line flags. type Options struct { - Images Images + Images Images + SpireConfig spireconfig.SpireConfig } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go index 50159ef447..a95b23e72a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go @@ -40,12 +40,6 @@ const ( // PipelineTaskLabelKey is used as the label identifier for a PipelineTask PipelineTaskLabelKey = GroupName + "/pipelineTask" - // ConditionCheckKey is used as the label identifier for a ConditionCheck - ConditionCheckKey = GroupName + "/conditionCheck" - - // ConditionNameKey is used as the label identifier for a Condition - ConditionNameKey = GroupName + "/conditionName" - // RunKey is used as the label identifier for a Run RunKey = GroupName + "/run" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go new file mode 100644 index 0000000000..f6e75960ff --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/container_types.go @@ -0,0 +1,541 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Step runs a subcomponent of a Task +type Step struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command and the Args will be passed to the Script. + // +optional + Script string `json:"script,omitempty"` + + // Timeout is the time after which the step times out. Defaults to never. + // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration + // +optional + Timeout *metav1.Duration `json:"timeout,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Step wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` + + // OnError defines the exiting behavior of a container on error + // can be set to [ continue | stopAndFail ] + // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code + // continue indicates continue executing the rest of the steps irrespective of the container exit code + OnError string `json:"onError,omitempty"` + // Stores configuration for the stdout stream of the step. + // +optional + StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"` + // Stores configuration for the stderr stream of the step. + // +optional + StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"` +} + +// StepOutputConfig stores configuration for a step output stream. +type StepOutputConfig struct { + // Path to duplicate stdout stream to on container's local filesystem. + // +optional + Path string `json:"path,omitempty"` +} + +// ToK8sContainer converts the Step to a Kubernetes Container struct +func (s *Step) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *Step) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// StepTemplate is a template for a Step +type StepTemplate struct { + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` +} + +// SetContainerFields sets the fields of the Step to the values of the corresponding fields in the Container +func (s *StepTemplate) SetContainerFields(c corev1.Container) { + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext +} + +// ToK8sContainer converts the StepTemplate to a Kubernetes Container struct +func (s *StepTemplate) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + } +} + +// Sidecar has nearly the same data structure as Step but does not have the ability to timeout. +type Sidecar struct { + + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced + // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will + // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless + // of whether the variable exists or not. Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + // +listType=atomic + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []corev1.ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + // +listType=atomic + EnvFrom []corev1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + // +listType=atomic + Env []corev1.EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + // +listType=atomic + VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + // +listType=atomic + VolumeDevices []corev1.VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *corev1.Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *corev1.Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *corev1.Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *corev1.Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy corev1.TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext defines the security options the container should be run with. + // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` + + // Script is the contents of an executable file to execute. + // + // If Script is not empty, the Step cannot have an Command or Args. + // +optional + Script string `json:"script,omitempty"` + + // This is an alpha field. You must set the "enable-api-fields" feature flag to "alpha" + // for this field to be supported. + // + // Workspaces is a list of workspaces from the Task that this Sidecar wants + // exclusive access to. Adding a workspace to this list means that any + // other Step or Sidecar that does not also request this Workspace will + // not have access to it. + // +optional + // +listType=atomic + Workspaces []WorkspaceUsage `json:"workspaces,omitempty"` +} + +// ToK8sContainer converts the Sidecar to a Kubernetes Container struct +func (s *Sidecar) ToK8sContainer() *corev1.Container { + return &corev1.Container{ + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.Ports, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.LivenessProbe, + ReadinessProbe: s.ReadinessProbe, + StartupProbe: s.StartupProbe, + Lifecycle: s.Lifecycle, + TerminationMessagePath: s.TerminationMessagePath, + TerminationMessagePolicy: s.TerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.Stdin, + StdinOnce: s.StdinOnce, + TTY: s.TTY, + } +} + +// SetContainerFields sets the fields of the Sidecar to the values of the corresponding fields in the Container +func (s *Sidecar) SetContainerFields(c corev1.Container) { + s.Name = c.Name + s.Image = c.Image + s.Command = c.Command + s.Args = c.Args + s.WorkingDir = c.WorkingDir + s.Ports = c.Ports + s.EnvFrom = c.EnvFrom + s.Env = c.Env + s.Resources = c.Resources + s.VolumeMounts = c.VolumeMounts + s.VolumeDevices = c.VolumeDevices + s.LivenessProbe = c.LivenessProbe + s.ReadinessProbe = c.ReadinessProbe + s.StartupProbe = c.StartupProbe + s.Lifecycle = c.Lifecycle + s.TerminationMessagePath = c.TerminationMessagePath + s.TerminationMessagePolicy = c.TerminationMessagePolicy + s.ImagePullPolicy = c.ImagePullPolicy + s.SecurityContext = c.SecurityContext + s.Stdin = c.Stdin + s.StdinOnce = c.StdinOnce + s.TTY = c.TTY +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go new file mode 100644 index 0000000000..d279002e61 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API Schema definitions for the pipeline v1 API group +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/tektoncd/pipeline/pkg/apis/pipeline +// +k8s:defaulter-gen=TypeMeta +// +groupName=tekton.dev +package v1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go new file mode 100644 index 0000000000..b7995ae90a --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/merge.go @@ -0,0 +1,116 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +// mergeData is used to store the intermediate data needed to merge an object +// with a template. It's provided to avoid repeatedly re-serializing the template. +// +k8s:openapi-gen=false +type mergeData struct { + emptyJSON []byte + templateJSON []byte + patchSchema strategicpatch.PatchMetaFromStruct +} + +// MergeStepsWithStepTemplate takes a possibly nil container template and a +// list of steps, merging each of the steps with the container template, if +// it's not nil, and returning the resulting list. +func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, error) { + if template == nil { + return steps, nil + } + + md, err := getMergeData(template.ToK8sContainer(), &corev1.Container{}) + if err != nil { + return nil, err + } + + for i, s := range steps { + merged := corev1.Container{} + err := mergeObjWithTemplateBytes(md, s.ToK8sContainer(), &merged) + if err != nil { + return nil, err + } + + // If the container's args is nil, reset it to empty instead + if merged.Args == nil && s.Args != nil { + merged.Args = []string{} + } + + // Pass through original step Script, for later conversion. + newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig} + newStep.SetContainerFields(merged) + steps[i] = newStep + } + return steps, nil +} + +// getMergeData serializes the template and empty object to get the intermediate results necessary for +// merging an object of the same type with this template. +// This function is provided to avoid repeatedly serializing an identical template. +func getMergeData(template, empty interface{}) (*mergeData, error) { + // We need JSON bytes to generate a patch to merge the object + // onto the template, so marshal the template. + templateJSON, err := json.Marshal(template) + if err != nil { + return nil, err + } + // We need to do a three-way merge to actually merge the template and + // object, so we need an empty object as the "original" + emptyJSON, err := json.Marshal(empty) + if err != nil { + return nil, err + } + // Get the patch meta, which is needed for generating and applying the merge patch. + patchSchema, err := strategicpatch.NewPatchMetaFromStruct(template) + if err != nil { + return nil, err + } + return &mergeData{templateJSON: templateJSON, emptyJSON: emptyJSON, patchSchema: patchSchema}, nil +} + +// mergeObjWithTemplateBytes merges obj with md's template JSON and updates out to reflect the merged result. +// out is a pointer to the zero value of obj's type. +// This function is provided to avoid repeatedly serializing an identical template. +func mergeObjWithTemplateBytes(md *mergeData, obj, out interface{}) error { + // Marshal the object to JSON + objAsJSON, err := json.Marshal(obj) + if err != nil { + return err + } + // Create a merge patch, with the empty JSON as the original, the object JSON as the modified, and the template + // JSON as the current - this lets us do a deep merge of the template and object, with awareness of + // the "patchMerge" tags. + patch, err := strategicpatch.CreateThreeWayMergePatch(md.emptyJSON, objAsJSON, md.templateJSON, md.patchSchema, true) + if err != nil { + return err + } + + // Actually apply the merge patch to the template JSON. + mergedAsJSON, err := strategicpatch.StrategicMergePatchUsingLookupPatchMeta(md.templateJSON, patch, md.patchSchema) + if err != nil { + return err + } + // Unmarshal the merged JSON to a pointer, and return it. + return json.Unmarshal(mergedAsJSON, out) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go new file mode 100644 index 0000000000..15ba0a4f8c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/openapi_generated.go @@ -0,0 +1,1791 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by openapi-gen. DO NOT EDIT. + +// This file was autogenerated by openapi-gen. Do not edit it manually! + +package v1 + +import ( + common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" +) + +func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { + return map[string]common.OpenAPIDefinition{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.AffinityAssistantTemplate": schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template": schema_pkg_apis_pipeline_pod_Template(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString": schema_pkg_apis_pipeline_v1_ArrayOrString(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Param": schema_pkg_apis_pipeline_v1_Param(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec": schema_pkg_apis_pipeline_v1_ParamSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PipelineWorkspaceDeclaration": schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec": schema_pkg_apis_pipeline_v1_PropertySpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam": schema_pkg_apis_pipeline_v1_ResolverParam(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverRef": schema_pkg_apis_pipeline_v1_ResolverRef(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar": schema_pkg_apis_pipeline_v1_Sidecar(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step": schema_pkg_apis_pipeline_v1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig": schema_pkg_apis_pipeline_v1_StepOutputConfig(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate": schema_pkg_apis_pipeline_v1_StepTemplate(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task": schema_pkg_apis_pipeline_v1_Task(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskList": schema_pkg_apis_pipeline_v1_TaskList(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult": schema_pkg_apis_pipeline_v1_TaskResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskRunResult": schema_pkg_apis_pipeline_v1_TaskRunResult(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec": schema_pkg_apis_pipeline_v1_TaskSpec(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceBinding": schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration": schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspacePipelineTaskBinding": schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage": schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref), + } +} + +func schema_pkg_apis_pipeline_pod_AffinityAssistantTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.Toleration"}, + } +} + +func schema_pkg_apis_pipeline_pod_Template(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Template holds pod specific configuration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's scheduling constraints", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "runtimeClassName": { + SchemaProps: spec.SchemaProps{ + Description: "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + Type: []string{"string"}, + Format: "", + }, + }, + "automountServiceAccountToken": { + SchemaProps: spec.SchemaProps{ + Description: "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dnsPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.", + Type: []string{"string"}, + Format: "", + }, + }, + "dnsConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + Ref: ref("k8s.io/api/core/v1.PodDNSConfig"), + }, + }, + "enableServiceLinks": { + SchemaProps: spec.SchemaProps{ + Description: "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + Type: []string{"string"}, + Format: "", + }, + }, + "schedulerName": { + SchemaProps: spec.SchemaProps{ + Description: "SchedulerName specifies the scheduler to be used to dispatch the Pod", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "hostAliases": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.HostAlias"), + }, + }, + }, + }, + }, + "hostNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "HostNetwork specifies whether the pod may use the node network namespace", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.HostAlias", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodDNSConfig", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_pipeline_v1_ArrayOrString(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "stringVal": { + SchemaProps: spec.SchemaProps{ + Description: "Represents the stored type of ArrayOrString.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "arrayVal": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "objectVal": { + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"type", "stringVal", "arrayVal", "objectVal"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_Param(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Param declares an ArrayOrString to use for the parameter called name.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_ParamSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name declares the name by which a parameter is referenced.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a user-facing description of the parameter that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs parameter.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "default": { + SchemaProps: spec.SchemaProps{ + Description: "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_PipelineWorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of a workspace to be provided by a PipelineRun.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + Type: []string{"string"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_PropertySpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PropertySpec defines the struct for object keys", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverParam(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverParam is a single parameter passed to a resolver.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the string value of the parameter that will be passed to the resolver.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_ResolverRef(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "resolver": { + SchemaProps: spec.SchemaProps{ + Description: "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + Type: []string{"string"}, + Format: "", + }, + }, + "resource": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ResolverParam"}, + } +} + +func schema_pkg_apis_pipeline_v1_Sidecar(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "ports": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-map-keys": []interface{}{ + "containerPort", + "protocol", + }, + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ContainerPort"), + }, + }, + }, + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "livenessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "readinessProbe": { + SchemaProps: spec.SchemaProps{ + Description: "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "startupProbe": { + SchemaProps: spec.SchemaProps{ + Description: "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + Ref: ref("k8s.io/api/core/v1.Probe"), + }, + }, + "lifecycle": { + SchemaProps: spec.SchemaProps{ + Description: "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + Ref: ref("k8s.io/api/core/v1.Lifecycle"), + }, + }, + "terminationMessagePath": { + SchemaProps: spec.SchemaProps{ + Description: "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "terminationMessagePolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "stdin": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "stdinOnce": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tty": { + SchemaProps: spec.SchemaProps{ + Description: "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + Type: []string{"string"}, + Format: "", + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Step(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Step runs a subcomponent of a Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + Type: []string{"string"}, + Format: "", + }, + }, + "timeout": { + SchemaProps: spec.SchemaProps{ + Description: "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Duration"), + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage"), + }, + }, + }, + }, + }, + "onError": { + SchemaProps: spec.SchemaProps{ + Description: "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + Type: []string{"string"}, + Format: "", + }, + }, + "stdoutConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stdout stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + "stderrConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stderr stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceUsage", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepOutputConfig stores configuration for a step output stream.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to duplicate stdout stream to on container's local filesystem.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_StepTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate is a template for a Step", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "image": { + SchemaProps: spec.SchemaProps{ + Description: "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + Type: []string{"string"}, + Format: "", + }, + }, + "command": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "args": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "workingDir": { + SchemaProps: spec.SchemaProps{ + Description: "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + Type: []string{"string"}, + Format: "", + }, + }, + "envFrom": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvFromSource"), + }, + }, + }, + }, + }, + "env": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "List of environment variables to set in the container. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.EnvVar"), + }, + }, + }, + }, + }, + "resources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "volumeMounts": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Pod volumes to mount into the container's filesystem. Cannot be updated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeMount"), + }, + }, + }, + }, + }, + "volumeDevices": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "volumeDevices is the list of block devices to be used by the container.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.VolumeDevice"), + }, + }, + }, + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Description: "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + Ref: ref("k8s.io/api/core/v1.SecurityContext"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount"}, + } +} + +func schema_pkg_apis_pipeline_v1_Task(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec holds the desired state of the Task from the client", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskList contains a list of Task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Task", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a human-readable description of the result", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.PropertySpec"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskRunResult(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskRunResult used to describe the results of a task", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name the given name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value the given value of the result", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"), + }, + }, + }, + Required: []string{"name", "value"}, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ArrayOrString"}, + } +} + +func schema_pkg_apis_pipeline_v1_TaskSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TaskSpec defines the desired state of Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "params": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec"), + }, + }, + }, + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is a user-facing description of the task that may be used to populate a UI.", + Type: []string{"string"}, + Format: "", + }, + }, + "steps": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step"), + }, + }, + }, + }, + }, + "volumes": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Volumes is a collection of volumes that are available to mount into the steps of the build.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), + }, + }, + }, + }, + }, + "stepTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate"), + }, + }, + "sidecars": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar"), + }, + }, + }, + }, + }, + "workspaces": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Workspaces are the volumes that this Task requires.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration"), + }, + }, + }, + }, + }, + "results": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-list-type": "atomic", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "Results are values that this Task can output", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult"), + }, + }, + }, + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.ParamSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Sidecar", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.Step", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.StepTemplate", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.TaskResult", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1.WorkspaceDeclaration", "k8s.io/api/core/v1.Volume"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceBinding maps a Task's declared workspace to a Volume.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace populated by the volume.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + "volumeClaimTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaim"), + }, + }, + "persistentVolumeClaim": { + SchemaProps: spec.SchemaProps{ + Description: "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + Ref: ref("k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource"), + }, + }, + "emptyDir": { + SchemaProps: spec.SchemaProps{ + Description: "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + Ref: ref("k8s.io/api/core/v1.EmptyDirVolumeSource"), + }, + }, + "configMap": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigMap represents a configMap that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ConfigMapVolumeSource"), + }, + }, + "secret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret represents a secret that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), + }, + }, + }, + Required: []string{"name"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceDeclaration(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name by which you can bind the volume at runtime.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "description": { + SchemaProps: spec.SchemaProps{ + Description: "Description is an optional human readable description of this volume.", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath overrides the directory that the volume will be made available at.", + Type: []string{"string"}, + Format: "", + }, + }, + "readOnly": { + SchemaProps: spec.SchemaProps{ + Description: "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "optional": { + SchemaProps: spec.SchemaProps{ + Description: "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspacePipelineTaskBinding(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace as declared by the task", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "workspace": { + SchemaProps: spec.SchemaProps{ + Description: "Workspace is the name of the workspace declared by the pipeline", + Type: []string{"string"}, + Format: "", + }, + }, + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name"}, + }, + }, + } +} + +func schema_pkg_apis_pipeline_v1_WorkspaceUsage(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the workspace this Step or Sidecar wants access to.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "mountPath": { + SchemaProps: spec.SchemaProps{ + Description: "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"name", "mountPath"}, + }, + }, + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go new file mode 100644 index 0000000000..c3fc297628 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/param_types.go @@ -0,0 +1,290 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + + resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + "github.com/tektoncd/pipeline/pkg/substitution" +) + +// exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else +// i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. +const exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` + +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) + +// ParamsPrefix is the prefix used in $(...) expressions referring to parameters +const ParamsPrefix = "params" + +// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as +// resources). Parameter values are provided by users as inputs on a TaskRun +// or PipelineRun. +type ParamSpec struct { + // Name declares the name by which a parameter is referenced. + Name string `json:"name"` + // Type is the user-specified type of the parameter. The possible types + // are currently "string", "array" and "object", and "string" is the default. + // +optional + Type ParamType `json:"type,omitempty"` + // Description is a user-facing description of the parameter that may be + // used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + // Properties is the JSON Schema properties to support key-value pairs parameter. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + // Default is the value a parameter takes if no input value is supplied. If + // default is set, a Task may be executed without a supplied value for the + // parameter. + // +optional + Default *ArrayOrString `json:"default,omitempty"` +} + +// PropertySpec defines the struct for object keys +type PropertySpec struct { + Type ParamType `json:"type,omitempty"` +} + +// SetDefaults set the default type +func (pp *ParamSpec) SetDefaults(context.Context) { + if pp == nil { + return + } + + // Propagate inferred type to the parent ParamSpec's type, and default type to the PropertySpec's type + // The sequence to look at is type in ParamSpec -> properties -> type in default -> array/string/object value in default + // If neither `properties` or `default` section is provided, ParamTypeString will be the default type. + switch { + case pp.Type != "": + // If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided. + pp.setDefaultsForProperties() + case pp.Properties != nil: + pp.Type = ParamTypeObject + // Also set default type for PropertySpec + pp.setDefaultsForProperties() + case pp.Default == nil: + // ParamTypeString is the default value (when no type can be inferred from the default value) + pp.Type = ParamTypeString + case pp.Default.Type != "": + pp.Type = pp.Default.Type + case pp.Default.ArrayVal != nil: + pp.Type = ParamTypeArray + case pp.Default.ObjectVal != nil: + pp.Type = ParamTypeObject + default: + pp.Type = ParamTypeString + } +} + +// setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified +func (pp *ParamSpec) setDefaultsForProperties() { + for key, propertySpec := range pp.Properties { + if propertySpec.Type == "" { + pp.Properties[key] = PropertySpec{Type: ParamTypeString} + } + } +} + +// ResourceParam declares a string value to use for the parameter called Name, and is used in +// the specific context of PipelineResources. +type ResourceParam = resource.ResourceParam + +// Param declares an ArrayOrString to use for the parameter called name. +type Param struct { + Name string `json:"name"` + Value ArrayOrString `json:"value"` +} + +// ParamType indicates the type of an input parameter; +// Used to distinguish between a single string and an array of strings. +type ParamType string + +// Valid ParamTypes: +const ( + ParamTypeString ParamType = "string" + ParamTypeArray ParamType = "array" + ParamTypeObject ParamType = "object" +) + +// AllParamTypes can be used for ParamType validation. +var AllParamTypes = []ParamType{ParamTypeString, ParamTypeArray, ParamTypeObject} + +// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: + +// ArrayOrString is a type that can hold a single string or string array. +// Used in JSON unmarshalling so that a single JSON field can accept +// either an individual string or an array of strings. +// TODO (@chuangw6): This struct will be renamed or be embedded in a new struct to take into +// consideration the object case after the community reaches an agreement on it. +type ArrayOrString struct { + Type ParamType `json:"type"` // Represents the stored type of ArrayOrString. + StringVal string `json:"stringVal"` + // +listType=atomic + ArrayVal []string `json:"arrayVal"` + ObjectVal map[string]string `json:"objectVal"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (arrayOrString *ArrayOrString) UnmarshalJSON(value []byte) error { + // ArrayOrString is used for Results Value as well, the results can be any kind of + // data so we need to check if it is empty. + if len(value) == 0 { + arrayOrString.Type = ParamTypeString + return nil + } + if value[0] == '[' { + // We're trying to Unmarshal to []string, but for cases like []int or other types + // of nested array which we don't support yet, we should continue and Unmarshal + // it to String. If the Type being set doesn't match what it actually should be, + // it will be captured by validation in reconciler. + // if failed to unmarshal to array, we will convert the value to string and marshal it to string + var a []string + if err := json.Unmarshal(value, &a); err == nil { + arrayOrString.Type = ParamTypeArray + arrayOrString.ArrayVal = a + return nil + } + } + if value[0] == '{' { + // if failed to unmarshal to map, we will convert the value to string and marshal it to string + var m map[string]string + if err := json.Unmarshal(value, &m); err == nil { + arrayOrString.Type = ParamTypeObject + arrayOrString.ObjectVal = m + return nil + } + } + + // By default we unmarshal to string + arrayOrString.Type = ParamTypeString + if err := json.Unmarshal(value, &arrayOrString.StringVal); err == nil { + return nil + } + arrayOrString.StringVal = string(value) + + return nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { + switch arrayOrString.Type { + case ParamTypeString: + return json.Marshal(arrayOrString.StringVal) + case ParamTypeArray: + return json.Marshal(arrayOrString.ArrayVal) + case ParamTypeObject: + return json.Marshal(arrayOrString.ObjectVal) + default: + return []byte{}, fmt.Errorf("impossible ArrayOrString.Type: %q", arrayOrString.Type) + } +} + +// ApplyReplacements applyes replacements for ArrayOrString type +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + switch arrayOrString.Type { + case ParamTypeArray: + var newArrayVal []string + for _, v := range arrayOrString.ArrayVal { + newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) + } + arrayOrString.ArrayVal = newArrayVal + case ParamTypeObject: + newObjectVal := map[string]string{} + for k, v := range arrayOrString.ObjectVal { + newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) + } + arrayOrString.ObjectVal = newObjectVal + default: + arrayOrString.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) + } +} + +// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result. +// If the value of arrayOrString is a reference to array or object, the type will be corrected from string to array/object. +func (arrayOrString *ArrayOrString) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + stringVal := arrayOrString.StringVal + + // if the stringVal is a string literal or a string that mixed with var references + // just do the normal string replacement + if !exactVariableSubstitutionRegex.MatchString(stringVal) { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + return + } + + // trim the head "$(" and the tail ")" or "[*])" + // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" + trimedStringVal := StripStarVarSubExpression(stringVal) + + // if the stringVal is a reference to a string param + if _, ok := stringReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } + + // if the stringVal is a reference to an array param, we need to change the type other than apply replacement + if _, ok := arrayReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements) + arrayOrString.Type = ParamTypeArray + } + + // if the stringVal is a reference an object param, we need to change the type other than apply replacement + if _, ok := objectReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ObjectVal = objectReplacements[trimedStringVal] + arrayOrString.Type = ParamTypeObject + } +} + +// StripStarVarSubExpression strips "$(target[*])"" to get "target" +func StripStarVarSubExpression(s string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") +} + +// NewArrayOrString creates an ArrayOrString of type ParamTypeString or ParamTypeArray, based on +// how many inputs are given (>1 input will create an array, not string). +func NewArrayOrString(value string, values ...string) *ArrayOrString { + if len(values) > 0 { + return &ArrayOrString{ + Type: ParamTypeArray, + ArrayVal: append([]string{value}, values...), + } + } + return &ArrayOrString{ + Type: ParamTypeString, + StringVal: value, + } +} + +// NewObject creates an ArrayOrString of type ParamTypeObject using the provided key-value pairs +func NewObject(pairs map[string]string) *ArrayOrString { + return &ArrayOrString{ + Type: ParamTypeObject, + ObjectVal: pairs, + } +} + +// ArrayReference returns the name of the parameter from array parameter reference +// returns arrayParam from $(params.arrayParam[*]) +func ArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go new file mode 100644 index 0000000000..0c8cf3337d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: pipeline.GroupName, Version: "v1"} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme adds Build types to the scheme. + AddToScheme = schemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Task{}, + &TaskList{}, + ) // TODO(#4983): v1 types go here + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go new file mode 100644 index 0000000000..bb547b2a0f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/resolver_types.go @@ -0,0 +1,48 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// ResolverName is the name of a resolver from which a resource can be +// requested. +type ResolverName string + +// ResolverRef can be used to refer to a Pipeline or Task in a remote +// location like a git repo. This feature is in alpha and these fields +// are only available when the alpha feature gate is enabled. +type ResolverRef struct { + // Resolver is the name of the resolver that should perform + // resolution of the referenced Tekton resource, such as "git". + // +optional + Resolver ResolverName `json:"resolver,omitempty"` + // Resource contains the parameters used to identify the + // referenced Tekton resource. Example entries might include + // "repo" or "path" but the set of params ultimately depends on + // the chosen resolver. + // +optional + // +listType=atomic + Resource []ResolverParam `json:"resource,omitempty"` +} + +// ResolverParam is a single parameter passed to a resolver. +type ResolverParam struct { + // Name is the name of the parameter that will be passed to the + // resolver. + Name string `json:"name"` + // Value is the string value of the parameter that will be + // passed to the resolver. + Value string `json:"value"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go similarity index 64% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go index 70e14bb774..9a5020ba12 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_defaults.go @@ -1,12 +1,9 @@ /* -Copyright 2019 The Tekton Authors - +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -14,13 +11,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import "context" -// PipelineObject is implemented by Pipeline and ClusterPipeline -type PipelineObject interface { - PipelineMetadata() metav1.ObjectMeta - PipelineSpec() PipelineSpec - Copy() PipelineObject +// SetDefaults set the default type for TaskResult +func (tr *TaskResult) SetDefaults(context.Context) { + if tr != nil && tr.Type == "" { + // ResultsTypeString is the default value + tr.Type = ResultsTypeString + } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go new file mode 100644 index 0000000000..daf9abf260 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_types.go @@ -0,0 +1,73 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "strings" + +// TaskResult used to describe the results of a task +type TaskResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Properties is the JSON Schema properties to support key-value pairs results. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + + // Description is a human-readable description of the result + // +optional + Description string `json:"description,omitempty"` +} + +// TaskRunResult used to describe the results of a task +type TaskRunResult struct { + // Name the given name + Name string `json:"name"` + + // Type is the user-specified type of the result. The possible type + // is currently "string" and will support "array" in following work. + // +optional + Type ResultsType `json:"type,omitempty"` + + // Value the given value of the result + Value ArrayOrString `json:"value"` +} + +// ResultsType indicates the type of a result; +// Used to distinguish between a single string and an array of strings. +// Note that there is ResultType used to find out whether a +// PipelineResourceResult is from a task result or not, which is different from +// this ResultsType. +// TODO(#4723): add "array" and "object" support +// TODO(#4723): align ResultsType and ParamType in ArrayOrString +type ResultsType string + +// Valid ResultsType: +const ( + ResultsTypeString ResultsType = "string" + ResultsTypeArray ResultsType = "array" + ResultsTypeObject ResultsType = "object" +) + +// AllResultsTypes can be used for ResultsTypes validation. +var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject} + +// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*]) +func ResultsArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go new file mode 100644 index 0000000000..de17c84718 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/result_validation.go @@ -0,0 +1,53 @@ +/* +Copyright 2022 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "regexp" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" + "knative.dev/pkg/apis" +) + +// ResultNameFormat Constant used to define the the regex Result.Name should follow +const ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` + +var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) + +// Validate implements apis.Validatable +func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { + if !resultNameFormatRegex.MatchString(tr.Name) { + return apis.ErrInvalidKeyName(tr.Name, "name", fmt.Sprintf("Name must consist of alphanumeric characters, '-', '_', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my-name', or 'my_name', regex used for validation is '%s')", ResultNameFormat)) + } + // Array and Object are alpha features + if tr.Type == ResultsTypeArray || tr.Type == ResultsTypeObject { + return errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + } + + // Resources created before the result. Type was introduced may not have Type set + // and should be considered valid + if tr.Type == "" { + return nil + } + + // By default the result type is string + if tr.Type != ResultsTypeString { + return apis.ErrInvalidValue(tr.Type, "type", fmt.Sprintf("type must be string")) + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json new file mode 100644 index 0000000000..87ba8b238f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/swagger.json @@ -0,0 +1,956 @@ +{ + "swagger": "2.0", + "info": { + "description": "Tekton Pipeline", + "title": "Tekton", + "version": "v0.17.2" + }, + "paths": {}, + "definitions": { + "pod.AffinityAssistantTemplate": { + "description": "AffinityAssistantTemplate holds pod specific configuration and is a subset of the generic pod Template", + "type": "object", + "properties": { + "imagePullSecrets": { + "description": "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.LocalObjectReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Toleration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "pod.Template": { + "description": "Template holds pod specific configuration", + "type": "object", + "properties": { + "affinity": { + "description": "If specified, the pod's scheduling constraints", + "$ref": "#/definitions/v1.Affinity" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted.", + "type": "boolean" + }, + "dnsConfig": { + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "$ref": "#/definitions/v1.PodDNSConfig" + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.", + "type": "string" + }, + "enableServiceLinks": { + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "type": "boolean" + }, + "hostAliases": { + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.HostAlias" + }, + "x-kubernetes-list-type": "atomic" + }, + "hostNetwork": { + "description": "HostNetwork specifies whether the pod may use the node network namespace", + "type": "boolean" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.LocalObjectReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "priorityClassName": { + "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "type": "string" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + "type": "string" + }, + "schedulerName": { + "description": "SchedulerName specifies the scheduler to be used to dispatch the Pod", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "$ref": "#/definitions/v1.PodSecurityContext" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Toleration" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumes": { + "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Volume" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + } + } + }, + "v1.ArrayOrString": { + "description": "ArrayOrString is a type that can hold a single string or string array. Used in JSON unmarshalling so that a single JSON field can accept either an individual string or an array of strings. consideration the object case after the community reaches an agreement on it.", + "type": "object", + "required": [ + "type", + "stringVal", + "arrayVal", + "objectVal" + ], + "properties": { + "arrayVal": { + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "objectVal": { + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, + "stringVal": { + "description": "Represents the stored type of ArrayOrString.", + "type": "string", + "default": "" + }, + "type": { + "type": "string", + "default": "" + } + } + }, + "v1.Param": { + "description": "Param declares an ArrayOrString to use for the parameter called name.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "type": "string", + "default": "" + }, + "value": { + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, + "v1.ParamSpec": { + "description": "ParamSpec defines arbitrary parameters needed beyond typed inputs (such as resources). Parameter values are provided by users as inputs on a TaskRun or PipelineRun.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "default": { + "description": "Default is the value a parameter takes if no input value is supplied. If default is set, a Task may be executed without a supplied value for the parameter.", + "$ref": "#/definitions/v1.ArrayOrString" + }, + "description": { + "description": "Description is a user-facing description of the parameter that may be used to populate a UI.", + "type": "string" + }, + "name": { + "description": "Name declares the name by which a parameter is referenced.", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs parameter.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the parameter. The possible types are currently \"string\", \"array\" and \"object\", and \"string\" is the default.", + "type": "string" + } + } + }, + "v1.PipelineWorkspaceDeclaration": { + "description": "WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun is expected to populate with a workspace binding. Deprecated: use PipelineWorkspaceDeclaration type instead", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human readable string describing how the workspace will be used in the Pipeline. It can be useful to include a bit of detail about which tasks are intended to have access to the data on the workspace.", + "type": "string" + }, + "name": { + "description": "Name is the name of a workspace to be provided by a PipelineRun.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in PipelineRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + } + } + }, + "v1.PropertySpec": { + "description": "PropertySpec defines the struct for object keys", + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, + "v1.ResolverParam": { + "description": "ResolverParam is a single parameter passed to a resolver.", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name is the name of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the string value of the parameter that will be passed to the resolver.", + "type": "string", + "default": "" + } + } + }, + "v1.ResolverRef": { + "description": "ResolverRef can be used to refer to a Pipeline or Task in a remote location like a git repo. This feature is in alpha and these fields are only available when the alpha feature gate is enabled.", + "type": "object", + "properties": { + "resolver": { + "description": "Resolver is the name of the resolver that should perform resolution of the referenced Tekton resource, such as \"git\".", + "type": "string" + }, + "resource": { + "description": "Resource contains the parameters used to identify the referenced Tekton resource. Example entries might include \"repo\" or \"path\" but the set of params ultimately depends on the chosen resolver.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ResolverParam" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Sidecar": { + "description": "Sidecar has nearly the same data structure as Step but does not have the ability to timeout.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "$ref": "#/definitions/v1.Lifecycle" + }, + "livenessProbe": { + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "ports": { + "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command or Args.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "startupProbe": { + "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/v1.Probe" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Sidecar wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.Step": { + "description": "Step runs a subcomponent of a Task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string", + "default": "" + }, + "onError": { + "description": "OnError defines the exiting behavior of a container on error can be set to [ continue | stopAndFail ] stopAndFail indicates exit the taskRun if the container exits with non-zero exit code continue indicates continue executing the rest of the steps irrespective of the container exit code", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "script": { + "description": "Script is the contents of an executable file to execute.\n\nIf Script is not empty, the Step cannot have an Command and the Args will be passed to the Script.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "stderrConfig": { + "description": "Stores configuration for the stderr stream of the step.", + "$ref": "#/definitions/v1.StepOutputConfig" + }, + "stdoutConfig": { + "description": "Stores configuration for the stdout stream of the step.", + "$ref": "#/definitions/v1.StepOutputConfig" + }, + "timeout": { + "description": "Timeout is the time after which the step times out. Defaults to never. Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration", + "$ref": "#/definitions/v1.Duration" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + }, + "workspaces": { + "description": "This is an alpha field. You must set the \"enable-api-fields\" feature flag to \"alpha\" for this field to be supported.\n\nWorkspaces is a list of workspaces from the Task that this Step wants exclusive access to. Adding a workspace to this list means that any other Step or Sidecar that does not also request this Workspace will not have access to it.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceUsage" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.StepOutputConfig": { + "description": "StepOutputConfig stores configuration for a step output stream.", + "type": "object", + "properties": { + "path": { + "description": "Path to duplicate stdout stream to on container's local filesystem.", + "type": "string" + } + } + }, + "v1.StepTemplate": { + "description": "StepTemplate is a template for a Step", + "type": "object", + "properties": { + "args": { + "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string", + "default": "" + }, + "x-kubernetes-list-type": "atomic" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvVar" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.EnvFromSource" + }, + "x-kubernetes-list-type": "atomic" + }, + "image": { + "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "default": {}, + "$ref": "#/definitions/v1.ResourceRequirements" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/v1.SecurityContext" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeDevice" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.VolumeMount" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, + "v1.Task": { + "description": "Task represents a collection of sequential steps that are run as part of a Pipeline using a set of inputs and producing a set of outputs. Tasks execute when TaskRuns are created that provide the input parameters and resources and output resources the Task requires.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds the desired state of the Task from the client", + "default": {}, + "$ref": "#/definitions/v1.TaskSpec" + } + } + }, + "v1.TaskList": { + "description": "TaskList contains a list of Task", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Task" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "default": {}, + "$ref": "#/definitions/v1.ListMeta" + } + } + }, + "v1.TaskResult": { + "description": "TaskResult used to describe the results of a task", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is a human-readable description of the result", + "type": "string" + }, + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs results.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1.PropertySpec" + } + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + } + } + }, + "v1.TaskRunResult": { + "description": "TaskRunResult used to describe the results of a task", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name the given name", + "type": "string", + "default": "" + }, + "type": { + "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", + "type": "string" + }, + "value": { + "description": "Value the given value of the result", + "default": {}, + "$ref": "#/definitions/v1.ArrayOrString" + } + } + }, + "v1.TaskSpec": { + "description": "TaskSpec defines the desired state of Task.", + "type": "object", + "properties": { + "description": { + "description": "Description is a user-facing description of the task that may be used to populate a UI.", + "type": "string" + }, + "params": { + "description": "Params is a list of input parameters required to run the task. Params must be supplied as inputs in TaskRuns unless they declare a default value.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.ParamSpec" + }, + "x-kubernetes-list-type": "atomic" + }, + "results": { + "description": "Results are values that this Task can output", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.TaskResult" + }, + "x-kubernetes-list-type": "atomic" + }, + "sidecars": { + "description": "Sidecars are run alongside the Task's step containers. They begin before the steps start and end after the steps complete.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Sidecar" + }, + "x-kubernetes-list-type": "atomic" + }, + "stepTemplate": { + "description": "StepTemplate can be used as the basis for all step containers within the Task, so that the steps inherit settings on the base container.", + "$ref": "#/definitions/v1.StepTemplate" + }, + "steps": { + "description": "Steps are the steps of the build; each step is run sequentially with the source mounted into /workspace.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Step" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumes": { + "description": "Volumes is a collection of volumes that are available to mount into the steps of the build.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.Volume" + }, + "x-kubernetes-list-type": "atomic" + }, + "workspaces": { + "description": "Workspaces are the volumes that this Task requires.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/v1.WorkspaceDeclaration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "v1.WorkspaceBinding": { + "description": "WorkspaceBinding maps a Task's declared workspace to a Volume.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "configMap": { + "description": "ConfigMap represents a configMap that should populate this workspace.", + "$ref": "#/definitions/v1.ConfigMapVolumeSource" + }, + "emptyDir": { + "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", + "$ref": "#/definitions/v1.EmptyDirVolumeSource" + }, + "name": { + "description": "Name is the name of the workspace populated by the volume.", + "type": "string", + "default": "" + }, + "persistentVolumeClaim": { + "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", + "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" + }, + "secret": { + "description": "Secret represents a secret that should populate this workspace.", + "$ref": "#/definitions/v1.SecretVolumeSource" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "volumeClaimTemplate": { + "description": "VolumeClaimTemplate is a template for a claim that will be created in the same namespace. The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun.", + "$ref": "#/definitions/v1.PersistentVolumeClaim" + } + } + }, + "v1.WorkspaceDeclaration": { + "description": "WorkspaceDeclaration is a declaration of a volume that a Task requires.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "description": { + "description": "Description is an optional human readable description of this volume.", + "type": "string" + }, + "mountPath": { + "description": "MountPath overrides the directory that the volume will be made available at.", + "type": "string" + }, + "name": { + "description": "Name is the name by which you can bind the volume at runtime.", + "type": "string", + "default": "" + }, + "optional": { + "description": "Optional marks a Workspace as not being required in TaskRuns. By default this field is false and so declared workspaces are required.", + "type": "boolean" + }, + "readOnly": { + "description": "ReadOnly dictates whether a mounted volume is writable. By default this field is false and so mounted volumes are writable.", + "type": "boolean" + } + } + }, + "v1.WorkspacePipelineTaskBinding": { + "description": "WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be mapped to a task's declared workspace.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name is the name of the workspace as declared by the task", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath is optionally a directory on the volume which should be used for this binding (i.e. the volume will be mounted at this sub directory).", + "type": "string" + }, + "workspace": { + "description": "Workspace is the name of the workspace declared by the pipeline", + "type": "string" + } + } + }, + "v1.WorkspaceUsage": { + "description": "WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access to a Workspace defined in a Task.", + "type": "object", + "required": [ + "name", + "mountPath" + ], + "properties": { + "mountPath": { + "description": "MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, overriding any MountPath specified in the Task's WorkspaceDeclaration.", + "type": "string", + "default": "" + }, + "name": { + "description": "Name is the name of the workspace this Step or Sidecar wants access to.", + "type": "string", + "default": "" + } + } + } + } +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go similarity index 76% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go index d61bc7b7cf..77a38425f2 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_defaults.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2022 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +package v1 import ( "context" @@ -34,14 +34,7 @@ func (ts *TaskSpec) SetDefaults(ctx context.Context) { for i := range ts.Params { ts.Params[i].SetDefaults(ctx) } - if ts.Inputs != nil { - ts.Inputs.SetDefaults(ctx) - } -} - -// SetDefaults implements apis.Defaultable -func (inputs *Inputs) SetDefaults(ctx context.Context) { - for i := range inputs.Params { - inputs.Params[i].SetDefaults(ctx) + for i := range ts.Results { + ts.Results[i].SetDefaults(ctx) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go new file mode 100644 index 0000000000..4283e8119c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_types.go @@ -0,0 +1,105 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "knative.dev/pkg/kmeta" +) + +// +genclient +// +genclient:noStatus +// +genreconciler:krshapedlogic=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Task represents a collection of sequential steps that are run as part of a +// Pipeline using a set of inputs and producing a set of outputs. Tasks execute +// when TaskRuns are created that provide the input parameters and resources and +// output resources the Task requires. +// +// +k8s:openapi-gen=true +type Task struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata"` + + // Spec holds the desired state of the Task from the client + // +optional + Spec TaskSpec `json:"spec"` +} + +var _ kmeta.OwnerRefable = (*Task)(nil) + +// GetGroupVersionKind implements kmeta.OwnerRefable. +func (*Task) GetGroupVersionKind() schema.GroupVersionKind { + return SchemeGroupVersion.WithKind(pipeline.TaskControllerName) +} + +// TaskSpec defines the desired state of Task. +type TaskSpec struct { + + // Params is a list of input parameters required to run the task. Params + // must be supplied as inputs in TaskRuns unless they declare a default + // value. + // +optional + // +listType=atomic + Params []ParamSpec `json:"params,omitempty"` + + // Description is a user-facing description of the task that may be + // used to populate a UI. + // +optional + Description string `json:"description,omitempty"` + + // Steps are the steps of the build; each step is run sequentially with the + // source mounted into /workspace. + // +listType=atomic + Steps []Step `json:"steps,omitempty"` + + // Volumes is a collection of volumes that are available to mount into the + // steps of the build. + // +listType=atomic + Volumes []corev1.Volume `json:"volumes,omitempty"` + + // StepTemplate can be used as the basis for all step containers within the + // Task, so that the steps inherit settings on the base container. + StepTemplate *StepTemplate `json:"stepTemplate,omitempty"` + + // Sidecars are run alongside the Task's step containers. They begin before + // the steps start and end after the steps complete. + // +listType=atomic + Sidecars []Sidecar `json:"sidecars,omitempty"` + + // Workspaces are the volumes that this Task requires. + // +listType=atomic + Workspaces []WorkspaceDeclaration `json:"workspaces,omitempty"` + + // Results are values that this Task can output + // +listType=atomic + Results []TaskResult `json:"results,omitempty"` +} + +// TaskList contains a list of Task +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TaskList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []Task `json:"items"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go new file mode 100644 index 0000000000..10d4470047 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -0,0 +1,600 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + "fmt" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + "github.com/tektoncd/pipeline/pkg/list" + "github.com/tektoncd/pipeline/pkg/substitution" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "knative.dev/pkg/apis" +) + +const ( + // stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules. + // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) + // - Must begin with a letter or an underscore (_) + stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" + + // objectVariableNameFormat is the regext used to validate object name and key names format + // The difference with the array or string name format is that object variable names shouldn't contain dots. + objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$" +) + +var _ apis.Validatable = (*Task)(nil) +var stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat) +var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) + +// Validate implements apis.Validatable +func (t *Task) Validate(ctx context.Context) *apis.FieldError { + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") + if apis.IsInDelete(ctx) { + return nil + } + return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) +} + +// Validate implements apis.Validatable +func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { + if len(ts.Steps) == 0 { + errs = errs.Also(apis.ErrMissingField("steps")) + } + + if config.IsSubstituted(ctx) { + // Validate the task's workspaces only. + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + + return errs + } + + errs = errs.Also(ValidateVolumes(ts.Volumes).ViaField("volumes")) + errs = errs.Also(validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate).ViaField("workspaces")) + errs = errs.Also(validateWorkspaceUsages(ctx, ts)) + mergedSteps, err := MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) + if err != nil { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("error merging step template and steps: %s", err), + Paths: []string{"stepTemplate"}, + Details: err.Error(), + }) + } + + errs = errs.Also(validateSteps(ctx, mergedSteps).ViaField("steps")) + errs = errs.Also(ValidateParameterTypes(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateParameterVariables(ctx, ts.Steps, ts.Params)) + errs = errs.Also(validateTaskContextVariables(ctx, ts.Steps)) + errs = errs.Also(validateResults(ctx, ts.Results).ViaField("results")) + return errs +} + +func validateResults(ctx context.Context, results []TaskResult) (errs *apis.FieldError) { + for index, result := range results { + errs = errs.Also(result.Validate(ctx).ViaIndex(index)) + } + return errs +} + +// a mount path which conflicts with any other declared workspaces, with the explicitly +// declared volume mounts, or with the stepTemplate. The names must also be unique. +func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) (errs *apis.FieldError) { + mountPaths := sets.NewString() + for _, step := range steps { + for _, vm := range step.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + if stepTemplate != nil { + for _, vm := range stepTemplate.VolumeMounts { + mountPaths.Insert(filepath.Clean(vm.MountPath)) + } + } + + wsNames := sets.NewString() + for idx, w := range workspaces { + // Workspace names must be unique + if wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace name %q must be unique", w.Name), "name").ViaIndex(idx)) + } else { + wsNames.Insert(w.Name) + } + // Workspaces must not try to use mount paths that are already used + mountPath := filepath.Clean(w.GetMountPath()) + if _, ok := mountPaths[mountPath]; ok { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("workspace mount path %q must be unique", mountPath), "mountpath").ViaIndex(idx)) + } + mountPaths[mountPath] = struct{}{} + } + return errs +} + +// validateWorkspaceUsages checks that all WorkspaceUsage objects in Steps +// refer to workspaces that are defined in the Task. +// +// This is an alpha feature and will fail validation if it's used by a step +// or sidecar when the enable-api-fields feature gate is anything but "alpha". +func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.FieldError) { + workspaces := ts.Workspaces + steps := ts.Steps + sidecars := ts.Sidecars + + wsNames := sets.NewString() + for _, w := range workspaces { + wsNames.Insert(w.Name) + } + + for stepIdx, step := range steps { + if len(step.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + } + for workspaceIdx, w := range step.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(stepIdx).ViaField("steps")) + } + } + } + + for sidecarIdx, sidecar := range sidecars { + if len(sidecar.Workspaces) != 0 { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + } + for workspaceIdx, w := range sidecar.Workspaces { + if !wsNames.Has(w.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("undefined workspace %q", w.Name), "name").ViaIndex(workspaceIdx).ViaField("workspaces").ViaIndex(sidecarIdx).ViaField("sidecars")) + } + } + } + + return errs +} + +// ValidateVolumes validates a slice of volumes to make sure there are no dupilcate names +func ValidateVolumes(volumes []corev1.Volume) (errs *apis.FieldError) { + // Task must not have duplicate volume names. + vols := sets.NewString() + for idx, v := range volumes { + if vols.Has(v.Name) { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("multiple volumes with same name %q", v.Name), "name").ViaIndex(idx)) + } else { + vols.Insert(v.Name) + } + } + return errs +} + +func validateSteps(ctx context.Context, steps []Step) (errs *apis.FieldError) { + // Task must not have duplicate step names. + names := sets.NewString() + for idx, s := range steps { + errs = errs.Also(validateStep(ctx, s, names).ViaIndex(idx)) + } + return errs +} + +func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.FieldError) { + if s.Image == "" { + errs = errs.Also(apis.ErrMissingField("Image")) + } + + if s.Script != "" { + if len(s.Command) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: "script cannot be used with command", + Paths: []string{"script"}, + }) + } + } + + if s.Name != "" { + if names.Has(s.Name) { + errs = errs.Also(apis.ErrInvalidValue(s.Name, "name")) + } + if e := validation.IsDNS1123Label(s.Name); len(e) > 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value %q", s.Name), + Paths: []string{"name"}, + Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + }) + } + names.Insert(s.Name) + } + + if s.Timeout != nil { + if s.Timeout.Duration < time.Duration(0) { + return apis.ErrInvalidValue(s.Timeout.Duration, "negative timeout") + } + } + + for j, vm := range s.VolumeMounts { + if strings.HasPrefix(vm.MountPath, "/tekton/") && + !strings.HasPrefix(vm.MountPath, "/tekton/home") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf("volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", vm.Name, vm.MountPath), "mountPath").ViaFieldIndex("volumeMounts", j)) + } + if strings.HasPrefix(vm.Name, "tekton-internal-") { + errs = errs.Also(apis.ErrGeneric(fmt.Sprintf(`volumeMount name %q cannot start with "tekton-internal-"`, vm.Name), "name").ViaFieldIndex("volumeMounts", j)) + } + } + + if s.OnError != "" { + if s.OnError != "continue" && s.OnError != "stopAndFail" { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("invalid value: %v", s.OnError), + Paths: []string{"onError"}, + Details: "Task step onError must be either continue or stopAndFail", + }) + } + } + + if s.Script != "" { + cleaned := strings.TrimSpace(s.Script) + if strings.HasPrefix(cleaned, "#!win") { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) + } + } + // StdoutConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StdoutConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig")) + } + // StderrConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StderrConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig")) + } + return errs +} + +// ValidateParameterTypes validates all the types within a slice of ParamSpecs +func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis.FieldError) { + for _, p := range params { + if p.Type == ParamTypeObject { + // Object type parameter is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + } + errs = errs.Also(p.ValidateType()) + } + return errs +} + +// ValidateType checks that the type of a ParamSpec is allowed and its default value matches that type +func (p ParamSpec) ValidateType() *apis.FieldError { + // Ensure param has a valid type. + validType := false + for _, allowedType := range AllParamTypes { + if p.Type == allowedType { + validType = true + } + } + if !validType { + return apis.ErrInvalidValue(p.Type, fmt.Sprintf("%s.type", p.Name)) + } + + // If a default value is provided, ensure its type matches param's declared type. + if (p.Default != nil) && (p.Default.Type != p.Type) { + return &apis.FieldError{ + Message: fmt.Sprintf( + "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), + Paths: []string{ + fmt.Sprintf("%s.type", p.Name), + fmt.Sprintf("%s.default.type", p.Name), + }, + } + } + + // Check object type and its PropertySpec type + return p.ValidateObjectType() +} + +// ValidateObjectType checks that object type parameter does not miss the +// definition of `properties` section and the type of a PropertySpec is allowed. +// (Currently, only string is allowed) +func (p ParamSpec) ValidateObjectType() *apis.FieldError { + if p.Type == ParamTypeObject && p.Properties == nil { + return apis.ErrMissingField(fmt.Sprintf("%s.properties", p.Name)) + } + + invalidKeys := []string{} + for key, propertySpec := range p.Properties { + if propertySpec.Type != ParamTypeString { + invalidKeys = append(invalidKeys, key) + } + } + + if len(invalidKeys) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("The value type specified for these keys %v is invalid", invalidKeys), + Paths: []string{fmt.Sprintf("%s.properties", p.Name)}, + } + } + + return nil +} + +// ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps +func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { + allParameterNames := sets.NewString() + stringParameterNames := sets.NewString() + arrayParameterNames := sets.NewString() + objectParamSpecs := []ParamSpec{} + var errs *apis.FieldError + for _, p := range params { + // validate no duplicate names + if allParameterNames.Has(p.Name) { + errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) + } + allParameterNames.Insert(p.Name) + + switch p.Type { + case ParamTypeArray: + arrayParameterNames.Insert(p.Name) + case ParamTypeObject: + objectParamSpecs = append(objectParamSpecs, p) + default: + stringParameterNames.Insert(p.Name) + } + } + + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) + errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) + errs = errs.Also(validateObjectDefault(objectParamSpecs)) + return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) +} + +func validateTaskContextVariables(ctx context.Context, steps []Step) *apis.FieldError { + taskRunContextNames := sets.NewString().Insert( + "name", + "namespace", + "uid", + ) + taskContextNames := sets.NewString().Insert( + "name", + "retry-count", + ) + errs := validateVariables(ctx, steps, "context\\.taskRun", taskRunContextNames) + return errs.Also(validateVariables(ctx, steps, "context\\.task", taskContextNames)) +} + +// validateObjectUsage validates the usage of individual attributes of an object param and the usage of the entire object +func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) (errs *apis.FieldError) { + objectParameterNames := sets.NewString() + for _, p := range params { + // collect all names of object type params + objectParameterNames.Insert(p.Name) + + // collect all keys for this object param + objectKeys := sets.NewString() + for key := range p.Properties { + objectKeys.Insert(key) + } + + // check if the object's key names are referenced correctly i.e. param.objectParam.key1 + errs = errs.Also(validateVariables(ctx, steps, fmt.Sprintf("params\\.%s", p.Name), objectKeys)) + } + + return errs.Also(validateObjectUsageAsWhole(steps, "params", objectParameterNames)) +} + +// validateObjectDefault validates the keys of all the object params within a +// slice of ParamSpecs are provided in default iff the default section is provided. +func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { + for _, p := range objectParams { + errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) + } + return errs +} + +// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. +func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { + if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { + return nil + } + + neededKeys := []string{} + providedKeys := []string{} + + // collect all needed keys + for key := range properties { + neededKeys = append(neededKeys, key) + } + + // collect all provided keys + for key := range propertiesProvider.ObjectVal { + providedKeys = append(providedKeys, key) + } + + missings := list.DiffLeft(neededKeys, providedKeys) + if len(missings) != 0 { + return &apis.FieldError{ + Message: fmt.Sprintf("Required key(s) %s are missing in the value provider.", missings), + Paths: []string{"properties", "default"}, + } + } + + return nil +} + +// validateObjectUsageAsWhole makes sure the object params are not used as whole when providing values for strings +// i.e. param.objectParam, param.objectParam[*] +func validateObjectUsageAsWhole(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepObjectUsageAsWhole(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepObjectUsageAsWhole(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoObjectReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoObjectReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoObjectReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoObjectReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskNoObjectReferenced(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskNoObjectReferenced(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoObjectReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoObjectReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoObjectReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateArrayUsage(steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + for idx, step := range steps { + errs = errs.Also(validateStepArrayUsage(step, prefix, vars)).ViaFieldIndex("steps", idx) + } + return errs +} + +func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskNoArrayReferenced(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskNoArrayReferenced(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskNoArrayReferenced(step.WorkingDir, prefix, vars).ViaField("workingDir")) + errs = errs.Also(validateTaskNoArrayReferenced(step.Script, prefix, vars).ViaField("script")) + for i, cmd := range step.Command { + errs = errs.Also(validateTaskArraysIsolated(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskArraysIsolated(arg, prefix, vars).ViaFieldIndex("args", i)) + + } + for _, env := range step.Env { + errs = errs.Also(validateTaskNoArrayReferenced(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskNoArrayReferenced(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.MountPath, prefix, vars).ViaField("mountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskNoArrayReferenced(v.SubPath, prefix, vars).ViaField("subPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { + // We've checked param name format. Now, we want to check if param names are referenced correctly in each step + for idx, step := range steps { + errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +// validateNameFormat validates that the name format of all param types follows the rules +func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) { + // checking string or array name format + // ---- + invalidStringAndArrayNames := []string{} + // Converting to sorted list here rather than just looping map keys + // because we want the order of items in vars to be deterministic for purpose of unit testing + for _, name := range stringAndArrayParams.List() { + if !stringAndArrayVariableNameFormatRegex.MatchString(name) { + invalidStringAndArrayNames = append(invalidStringAndArrayNames, name) + } + } + + if len(invalidStringAndArrayNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames), + Paths: []string{"params"}, + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }) + } + + // checking object name and key name format + // ----- + invalidObjectNames := map[string][]string{} + for _, obj := range objectParams { + // check object param name + if !objectVariableNameFormatRegex.MatchString(obj.Name) { + invalidObjectNames[obj.Name] = []string{} + } + + // check key names + for k := range obj.Properties { + if !objectVariableNameFormatRegex.MatchString(k) { + invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k) + } + } + } + + if len(invalidObjectNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }) + } + + return errs +} + +func validateStepVariables(ctx context.Context, step Step, prefix string, vars sets.String) *apis.FieldError { + errs := validateTaskVariable(step.Name, prefix, vars).ViaField("name") + errs = errs.Also(validateTaskVariable(step.Image, prefix, vars).ViaField("image")) + errs = errs.Also(validateTaskVariable(step.WorkingDir, prefix, vars).ViaField("workingDir")) + if !(config.FromContextOrDefaults(ctx).FeatureFlags.EnableAPIFields == "alpha" && prefix == "params") { + errs = errs.Also(validateTaskVariable(step.Script, prefix, vars).ViaField("script")) + } + for i, cmd := range step.Command { + errs = errs.Also(validateTaskVariable(cmd, prefix, vars).ViaFieldIndex("command", i)) + } + for i, arg := range step.Args { + errs = errs.Also(validateTaskVariable(arg, prefix, vars).ViaFieldIndex("args", i)) + } + for _, env := range step.Env { + errs = errs.Also(validateTaskVariable(env.Value, prefix, vars).ViaFieldKey("env", env.Name)) + } + for i, v := range step.VolumeMounts { + errs = errs.Also(validateTaskVariable(v.Name, prefix, vars).ViaField("name").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.MountPath, prefix, vars).ViaField("MountPath").ViaFieldIndex("volumeMount", i)) + errs = errs.Also(validateTaskVariable(v.SubPath, prefix, vars).ViaField("SubPath").ViaFieldIndex("volumeMount", i)) + } + return errs +} + +func validateTaskVariable(value, prefix string, vars sets.String) *apis.FieldError { + return substitution.ValidateVariableP(value, prefix, vars) +} + +func validateTaskNoObjectReferenced(value, prefix string, objectNames sets.String) *apis.FieldError { + return substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames) +} + +func validateTaskNoArrayReferenced(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableProhibitedP(value, prefix, arrayNames) +} + +func validateTaskArraysIsolated(value, prefix string, arrayNames sets.String) *apis.FieldError { + return substitution.ValidateVariableIsolatedP(value, prefix, arrayNames) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go new file mode 100644 index 0000000000..da89660624 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_types.go @@ -0,0 +1,124 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "path/filepath" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + corev1 "k8s.io/api/core/v1" +) + +// WorkspaceDeclaration is a declaration of a volume that a Task requires. +type WorkspaceDeclaration struct { + // Name is the name by which you can bind the volume at runtime. + Name string `json:"name"` + // Description is an optional human readable description of this volume. + // +optional + Description string `json:"description,omitempty"` + // MountPath overrides the directory that the volume will be made available at. + // +optional + MountPath string `json:"mountPath,omitempty"` + // ReadOnly dictates whether a mounted volume is writable. By default this + // field is false and so mounted volumes are writable. + ReadOnly bool `json:"readOnly,omitempty"` + // Optional marks a Workspace as not being required in TaskRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// GetMountPath returns the mountPath for w which is the MountPath if provided or the +// default if not. +func (w *WorkspaceDeclaration) GetMountPath() string { + if w.MountPath != "" { + return w.MountPath + } + return filepath.Join(pipeline.WorkspaceDir, w.Name) +} + +// WorkspaceBinding maps a Task's declared workspace to a Volume. +type WorkspaceBinding struct { + // Name is the name of the workspace populated by the volume. + Name string `json:"name"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` + // VolumeClaimTemplate is a template for a claim that will be created in the same namespace. + // The PipelineRun controller is responsible for creating a unique claim for each instance of PipelineRun. + // +optional + VolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used. + // +optional + PersistentVolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty"` + // EmptyDir represents a temporary directory that shares a Task's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // Either this OR PersistentVolumeClaim can be used. + // +optional + EmptyDir *corev1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` + // ConfigMap represents a configMap that should populate this workspace. + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` + // Secret represents a secret that should populate this workspace. + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` +} + +// WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +// Deprecated: use PipelineWorkspaceDeclaration type instead +type WorkspacePipelineDeclaration = PipelineWorkspaceDeclaration + +// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun +// is expected to populate with a workspace binding. +type PipelineWorkspaceDeclaration struct { + // Name is the name of a workspace to be provided by a PipelineRun. + Name string `json:"name"` + // Description is a human readable string describing how the workspace will be + // used in the Pipeline. It can be useful to include a bit of detail about which + // tasks are intended to have access to the data on the workspace. + // +optional + Description string `json:"description,omitempty"` + // Optional marks a Workspace as not being required in PipelineRuns. By default + // this field is false and so declared workspaces are required. + Optional bool `json:"optional,omitempty"` +} + +// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be +// mapped to a task's declared workspace. +type WorkspacePipelineTaskBinding struct { + // Name is the name of the workspace as declared by the task + Name string `json:"name"` + // Workspace is the name of the workspace declared by the pipeline + // +optional + Workspace string `json:"workspace,omitempty"` + // SubPath is optionally a directory on the volume which should be used + // for this binding (i.e. the volume will be mounted at this sub directory). + // +optional + SubPath string `json:"subPath,omitempty"` +} + +// WorkspaceUsage is used by a Step or Sidecar to declare that it wants isolated access +// to a Workspace defined in a Task. +type WorkspaceUsage struct { + // Name is the name of the workspace this Step or Sidecar wants access to. + Name string `json:"name"` + // MountPath is the path that the workspace should be mounted to inside the Step or Sidecar, + // overriding any MountPath specified in the Task's WorkspaceDeclaration. + MountPath string `json:"mountPath"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go new file mode 100644 index 0000000000..be852bb46d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/workspace_validation.go @@ -0,0 +1,92 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + "knative.dev/pkg/apis" +) + +// allVolumeSourceFields is a list of all the volume source field paths that a +// WorkspaceBinding may include. +var allVolumeSourceFields = []string{ + "persistentvolumeclaim", + "volumeclaimtemplate", + "emptydir", + "configmap", + "secret", +} + +// Validate looks at the Volume provided in wb and makes sure that it is valid. +// This means that only one VolumeSource can be specified, and also that the +// supported VolumeSource is itself valid. +func (b *WorkspaceBinding) Validate(context.Context) *apis.FieldError { + if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { + return apis.ErrMissingField(apis.CurrentField) + } + + numSources := b.numSources() + + if numSources > 1 { + return apis.ErrMultipleOneOf(allVolumeSourceFields...) + } + + if numSources == 0 { + return apis.ErrMissingOneOf(allVolumeSourceFields...) + } + + // For a PersistentVolumeClaim to work, you must at least provide the name of the PVC to use. + if b.PersistentVolumeClaim != nil && b.PersistentVolumeClaim.ClaimName == "" { + return apis.ErrMissingField("persistentvolumeclaim.claimname") + } + + // For a ConfigMap to work, you must provide the name of the ConfigMap to use. + if b.ConfigMap != nil && b.ConfigMap.LocalObjectReference.Name == "" { + return apis.ErrMissingField("configmap.name") + } + + // For a Secret to work, you must provide the name of the Secret to use. + if b.Secret != nil && b.Secret.SecretName == "" { + return apis.ErrMissingField("secret.secretName") + } + + return nil +} + +// numSources returns the total number of volume sources that this WorkspaceBinding +// has been configured with. +func (b *WorkspaceBinding) numSources() int { + n := 0 + if b.VolumeClaimTemplate != nil { + n++ + } + if b.PersistentVolumeClaim != nil { + n++ + } + if b.EmptyDir != nil { + n++ + } + if b.ConfigMap != nil { + n++ + } + if b.Secret != nil { + n++ + } + return n +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f618a68748 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/zz_generated.deepcopy.go @@ -0,0 +1,660 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArrayOrString) DeepCopyInto(out *ArrayOrString) { + *out = *in + if in.ArrayVal != nil { + in, out := &in.ArrayVal, &out.ArrayVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ObjectVal != nil { + in, out := &in.ObjectVal, &out.ObjectVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArrayOrString. +func (in *ArrayOrString) DeepCopy() *ArrayOrString { + if in == nil { + return nil + } + out := new(ArrayOrString) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Param) DeepCopyInto(out *Param) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Param. +func (in *Param) DeepCopy() *Param { + if in == nil { + return nil + } + out := new(Param) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ParamSpec) DeepCopyInto(out *ParamSpec) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = new(ArrayOrString) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamSpec. +func (in *ParamSpec) DeepCopy() *ParamSpec { + if in == nil { + return nil + } + out := new(ParamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PipelineWorkspaceDeclaration) DeepCopyInto(out *PipelineWorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineWorkspaceDeclaration. +func (in *PipelineWorkspaceDeclaration) DeepCopy() *PipelineWorkspaceDeclaration { + if in == nil { + return nil + } + out := new(PipelineWorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PropertySpec) DeepCopyInto(out *PropertySpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PropertySpec. +func (in *PropertySpec) DeepCopy() *PropertySpec { + if in == nil { + return nil + } + out := new(PropertySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverParam) DeepCopyInto(out *ResolverParam) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverParam. +func (in *ResolverParam) DeepCopy() *ResolverParam { + if in == nil { + return nil + } + out := new(ResolverParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResolverRef) DeepCopyInto(out *ResolverRef) { + *out = *in + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = make([]ResolverParam, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResolverRef. +func (in *ResolverRef) DeepCopy() *ResolverRef { + if in == nil { + return nil + } + out := new(ResolverRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sidecar) DeepCopyInto(out *Sidecar) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]corev1.ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(corev1.Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(corev1.Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sidecar. +func (in *Sidecar) DeepCopy() *Sidecar { + if in == nil { + return nil + } + out := new(Sidecar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Step) DeepCopyInto(out *Step) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + if in.Timeout != nil { + in, out := &in.Timeout, &out.Timeout + *out = new(metav1.Duration) + **out = **in + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceUsage, len(*in)) + copy(*out, *in) + } + if in.StdoutConfig != nil { + in, out := &in.StdoutConfig, &out.StdoutConfig + *out = new(StepOutputConfig) + **out = **in + } + if in.StderrConfig != nil { + in, out := &in.StderrConfig, &out.StderrConfig + *out = new(StepOutputConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. +func (in *Step) DeepCopy() *Step { + if in == nil { + return nil + } + out := new(Step) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig. +func (in *StepOutputConfig) DeepCopy() *StepOutputConfig { + if in == nil { + return nil + } + out := new(StepOutputConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepTemplate) DeepCopyInto(out *StepTemplate) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]corev1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]corev1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]corev1.VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(corev1.SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepTemplate. +func (in *StepTemplate) DeepCopy() *StepTemplate { + if in == nil { + return nil + } + out := new(StepTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Task) DeepCopyInto(out *Task) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. +func (in *Task) DeepCopy() *Task { + if in == nil { + return nil + } + out := new(Task) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Task) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskList) DeepCopyInto(out *TaskList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Task, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. +func (in *TaskList) DeepCopy() *TaskList { + if in == nil { + return nil + } + out := new(TaskList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TaskList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskResult) DeepCopyInto(out *TaskResult) { + *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskResult. +func (in *TaskResult) DeepCopy() *TaskResult { + if in == nil { + return nil + } + out := new(TaskResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskRunResult) DeepCopyInto(out *TaskRunResult) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunResult. +func (in *TaskRunResult) DeepCopy() *TaskRunResult { + if in == nil { + return nil + } + out := new(TaskRunResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { + *out = *in + if in.Params != nil { + in, out := &in.Params, &out.Params + *out = make([]ParamSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]Step, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]corev1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StepTemplate != nil { + in, out := &in.StepTemplate, &out.StepTemplate + *out = new(StepTemplate) + (*in).DeepCopyInto(*out) + } + if in.Sidecars != nil { + in, out := &in.Sidecars, &out.Sidecars + *out = make([]Sidecar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Workspaces != nil { + in, out := &in.Workspaces, &out.Workspaces + *out = make([]WorkspaceDeclaration, len(*in)) + copy(*out, *in) + } + if in.Results != nil { + in, out := &in.Results, &out.Results + *out = make([]TaskResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. +func (in *TaskSpec) DeepCopy() *TaskSpec { + if in == nil { + return nil + } + out := new(TaskSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { + *out = *in + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(corev1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(corev1.PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(corev1.EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceBinding. +func (in *WorkspaceBinding) DeepCopy() *WorkspaceBinding { + if in == nil { + return nil + } + out := new(WorkspaceBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceDeclaration) DeepCopyInto(out *WorkspaceDeclaration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceDeclaration. +func (in *WorkspaceDeclaration) DeepCopy() *WorkspaceDeclaration { + if in == nil { + return nil + } + out := new(WorkspaceDeclaration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspacePipelineTaskBinding) DeepCopyInto(out *WorkspacePipelineTaskBinding) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspacePipelineTaskBinding. +func (in *WorkspacePipelineTaskBinding) DeepCopy() *WorkspacePipelineTaskBinding { + if in == nil { + return nil + } + out := new(WorkspacePipelineTaskBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceUsage) DeepCopyInto(out *WorkspaceUsage) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceUsage. +func (in *WorkspaceUsage) DeepCopy() *WorkspaceUsage { + if in == nil { + return nil + } + out := new(WorkspaceUsage) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go deleted file mode 100644 index 976add0d50..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*ClusterTask)(nil) - -// ConvertTo implements api.Convertible -func (ct *ClusterTask) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.ClusterTask: - sink.ObjectMeta = ct.ObjectMeta - return ct.Spec.ConvertTo(ctx, &sink.Spec) - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertFrom implements api.Convertible -func (ct *ClusterTask) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.ClusterTask: - ct.ObjectMeta = source.ObjectMeta - return ct.Spec.ConvertFrom(ctx, &source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", ct) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go deleted file mode 100644 index 1b2ebd2785..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:noStatus -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterTask is a Task with a cluster scope. ClusterTasks are used to -// represent Tasks that should be publicly addressable from any namespace in the -// cluster. -type ClusterTask struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the desired state of the Task from the client - // +optional - Spec TaskSpec `json:"spec,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterTaskList contains a list of ClusterTask. -type ClusterTaskList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []ClusterTask `json:"items"` -} - -// TaskSpec returns the ClusterTask's Spec. -func (t *ClusterTask) TaskSpec() TaskSpec { - return t.Spec -} - -// TaskMetadata returns the ObjectMeta for the ClusterTask. -func (t *ClusterTask) TaskMetadata() metav1.ObjectMeta { - return t.ObjectMeta -} - -// Copy returns a DeepCopy of the ClusterTask. -func (t *ClusterTask) Copy() TaskObject { - return t.DeepCopy() -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go deleted file mode 100644 index 77adb5ec12..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*ClusterTask)(nil) - -// Validate performs validation of the metadata and spec of this ClusterTask. -func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return t.Spec.Validate(ctx) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go deleted file mode 100644 index 0454193445..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplyContainerReplacements replaces ${...} expressions in the container's name, image, args, env, command, workingDir, -// and volumes. -func ApplyContainerReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Name = substitution.ApplyReplacements(step.Name, stringReplacements) - step.Image = substitution.ApplyReplacements(step.Image, stringReplacements) - - // Use ApplyArrayReplacements here, as additional args may be added via an array parameter. - var newArgs []string - for _, a := range step.Args { - newArgs = append(newArgs, substitution.ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...) - } - step.Args = newArgs - - for ie, e := range step.Env { - step.Env[ie].Value = substitution.ApplyReplacements(e.Value, stringReplacements) - if step.Env[ie].ValueFrom != nil { - if e.ValueFrom.SecretKeyRef != nil { - step.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements) - } - if e.ValueFrom.ConfigMapKeyRef != nil { - step.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements) - step.Env[ie].ValueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements) - } - } - } - - for ie, e := range step.EnvFrom { - step.EnvFrom[ie].Prefix = substitution.ApplyReplacements(e.Prefix, stringReplacements) - if e.ConfigMapRef != nil { - step.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements) - } - if e.SecretRef != nil { - step.EnvFrom[ie].SecretRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements) - } - } - step.WorkingDir = substitution.ApplyReplacements(step.WorkingDir, stringReplacements) - - // Use ApplyArrayReplacements here, as additional commands may be added via an array parameter. - var newCommand []string - for _, c := range step.Command { - newCommand = append(newCommand, substitution.ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...) - } - step.Command = newCommand - - for iv, v := range step.VolumeMounts { - step.VolumeMounts[iv].Name = substitution.ApplyReplacements(v.Name, stringReplacements) - step.VolumeMounts[iv].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements) - step.VolumeMounts[iv].SubPath = substitution.ApplyReplacements(v.SubPath, stringReplacements) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go deleted file mode 100644 index 2ebdba1a1f..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2020 The Tekton Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -const ( - // ConditionTypeConvertible is a Warning condition that is set on - // resources when they cannot be converted to warn of a forthcoming - // breakage. - ConditionTypeConvertible apis.ConditionType = v1beta1.ConditionTypeConvertible - // ConversionErrorFieldNotAvailableMsg Conversion Error message for a field not available in v1alpha1 - ConversionErrorFieldNotAvailableMsg = "the specified field/section is not available in v1alpha1" -) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go deleted file mode 100644 index 69b21947c6..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// ParamSpec defines arbitrary parameters needed beyond typed inputs (such as -// resources). Parameter values are provided by users as inputs on a TaskRun -// or PipelineRun. -type ParamSpec = v1beta1.ParamSpec - -// Param declares an ArrayOrString to use for the parameter called name. -type Param = v1beta1.Param - -// ParamType indicates the type of an input parameter; -// Used to distinguish between a single string and an array of strings. -type ParamType = v1beta1.ParamType - -// Valid ParamTypes: -const ( - ParamTypeString ParamType = v1beta1.ParamTypeString - ParamTypeArray ParamType = v1beta1.ParamTypeArray -) - -// AllParamTypes can be used for ParamType validation. -var AllParamTypes = v1beta1.AllParamTypes - -// ArrayOrString is modeled after IntOrString in kubernetes/apimachinery: - -// ArrayOrString is a type that can hold a single string or string array. -// Used in JSON unmarshalling so that a single JSON field can accept -// either an individual string or an array of strings. -type ArrayOrString = v1beta1.ArrayOrString diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go deleted file mode 100644 index b37d62a70b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go +++ /dev/null @@ -1,175 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -const finallyAnnotationKey = "tekton.dev/v1beta1Finally" - -var _ apis.Convertible = (*Pipeline)(nil) - -// ConvertTo implements api.Convertible -func (p *Pipeline) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.Pipeline: - sink.ObjectMeta = p.ObjectMeta - if err := p.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - if err := deserializeFinally(&sink.ObjectMeta, &sink.Spec); err != nil { - return err - } - if err := v1beta1.ValidatePipelineTasks(ctx, sink.Spec.Tasks, sink.Spec.Finally); err != nil { - return fmt.Errorf("error converting finally annotation into beta field: %w", err) - } - default: - return fmt.Errorf("unknown version, got: %T", sink) - } - return nil -} - -// ConvertTo implements api.Convertible -func (ps *PipelineSpec) ConvertTo(ctx context.Context, sink *v1beta1.PipelineSpec) error { - sink.Resources = ps.Resources - sink.Params = ps.Params - sink.Workspaces = ps.Workspaces - sink.Description = ps.Description - if len(ps.Tasks) > 0 { - sink.Tasks = make([]v1beta1.PipelineTask, len(ps.Tasks)) - for i := range ps.Tasks { - if err := ps.Tasks[i].ConvertTo(ctx, &sink.Tasks[i]); err != nil { - return err - } - } - } - sink.Finally = nil - return nil -} - -// ConvertTo implements api.Convertible -func (pt *PipelineTask) ConvertTo(ctx context.Context, sink *v1beta1.PipelineTask) error { - sink.Name = pt.Name - sink.TaskRef = pt.TaskRef - if pt.TaskSpec != nil { - sink.TaskSpec = &v1beta1.EmbeddedTask{TaskSpec: v1beta1.TaskSpec{}} - if err := pt.TaskSpec.ConvertTo(ctx, &sink.TaskSpec.TaskSpec); err != nil { - return err - } - } - sink.Retries = pt.Retries - sink.RunAfter = pt.RunAfter - sink.Resources = pt.Resources - sink.Params = pt.Params - sink.Workspaces = pt.Workspaces - sink.Timeout = pt.Timeout - return nil -} - -// ConvertFrom implements api.Convertible -func (p *Pipeline) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.Pipeline: - p.ObjectMeta = source.ObjectMeta - if err := serializeFinally(&p.ObjectMeta, source.Spec.Finally); err != nil { - return err - } - return p.Spec.ConvertFrom(ctx, source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", p) - } -} - -// ConvertFrom implements api.Convertible -func (ps *PipelineSpec) ConvertFrom(ctx context.Context, source v1beta1.PipelineSpec) error { - ps.Resources = source.Resources - ps.Params = source.Params - ps.Workspaces = source.Workspaces - ps.Description = source.Description - if len(source.Tasks) > 0 { - ps.Tasks = make([]PipelineTask, len(source.Tasks)) - for i := range source.Tasks { - if err := ps.Tasks[i].ConvertFrom(ctx, source.Tasks[i]); err != nil { - return err - } - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (pt *PipelineTask) ConvertFrom(ctx context.Context, source v1beta1.PipelineTask) error { - pt.Name = source.Name - pt.TaskRef = source.TaskRef - if source.TaskSpec != nil { - pt.TaskSpec = &TaskSpec{} - if err := pt.TaskSpec.ConvertFrom(ctx, &source.TaskSpec.TaskSpec); err != nil { - return err - } - } - pt.Retries = source.Retries - pt.RunAfter = source.RunAfter - pt.Resources = source.Resources - pt.Params = source.Params - pt.Workspaces = source.Workspaces - pt.Timeout = source.Timeout - return nil -} - -// serializeFinally serializes a list of Finally Tasks to the annotations -// of an object's metadata section. This can then be used to re-instantiate -// the Finally Tasks when converting back up to v1beta1 and beyond. -func serializeFinally(meta *metav1.ObjectMeta, finally []v1beta1.PipelineTask) error { - if len(finally) != 0 { - b, err := json.Marshal(finally) - if err != nil { - return err - } - if meta.Annotations == nil { - meta.Annotations = make(map[string]string) - } - meta.Annotations[finallyAnnotationKey] = string(b) - } - return nil -} - -// deserializeFinally populates a PipelineSpec's Finally list -// from an annotation found on resources that have been previously -// converted down from v1beta1 to v1alpha1. -func deserializeFinally(meta *metav1.ObjectMeta, spec *v1beta1.PipelineSpec) error { - if meta.Annotations != nil { - if str, ok := meta.Annotations[finallyAnnotationKey]; ok { - finally := []v1beta1.PipelineTask{} - if err := json.Unmarshal([]byte(str), &finally); err != nil { - return fmt.Errorf("error converting finally annotation into beta field: %w", err) - } - delete(meta.Annotations, finallyAnnotationKey) - if len(meta.Annotations) == 0 { - meta.Annotations = nil - } - spec.Finally = finally - } - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go deleted file mode 100644 index 2bb226c034..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*Pipeline)(nil) - -// SetDefaults sets default values on the Pipeline's Spec -func (p *Pipeline) SetDefaults(ctx context.Context) { - p.Spec.SetDefaults(ctx) -} - -// SetDefaults sets default values for the PipelineSpec's Params and Tasks -func (ps *PipelineSpec) SetDefaults(ctx context.Context) { - for _, pt := range ps.Tasks { - if pt.TaskRef != nil { - if pt.TaskRef.Kind == "" { - pt.TaskRef.Kind = NamespacedTaskKind - } - } - if pt.TaskSpec != nil { - pt.TaskSpec.SetDefaults(ctx) - } - } - for i := range ps.Params { - ps.Params[i].SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go deleted file mode 100644 index d35e41bf1b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" -) - -// PipelineResourceType represents the type of endpoint the pipelineResource is, so that the -// controller will know this pipelineResource should be fetched and optionally what -// additional metatdata should be provided for it. -type PipelineResourceType = resource.PipelineResourceType - -const ( - // PipelineResourceTypeGit indicates that this source is a Git repo. - PipelineResourceTypeGit PipelineResourceType = resource.PipelineResourceTypeGit - - // PipelineResourceTypeStorage indicates that this source is a storage blob resource. - PipelineResourceTypeStorage PipelineResourceType = resource.PipelineResourceTypeStorage - - // PipelineResourceTypeImage indicates that this source is a docker Image. - PipelineResourceTypeImage PipelineResourceType = resource.PipelineResourceTypeImage - - // PipelineResourceTypeCluster indicates that this source is a k8s cluster Image. - PipelineResourceTypeCluster PipelineResourceType = resource.PipelineResourceTypeCluster - - // PipelineResourceTypePullRequest indicates that this source is a SCM Pull Request. - PipelineResourceTypePullRequest PipelineResourceType = resource.PipelineResourceTypePullRequest - - // PipelineResourceTypeCloudEvent indicates that this source is a cloud event URI - PipelineResourceTypeCloudEvent PipelineResourceType = resource.PipelineResourceTypeCloudEvent -) - -// AllResourceTypes can be used for validation to check if a provided Resource type is one of the known types. -var AllResourceTypes = resource.AllResourceTypes - -// PipelineResource describes a resource that is an input to or output from a -// Task. -// -type PipelineResource = resource.PipelineResource - -// PipelineResourceSpec defines an individual resources used in the pipeline. -type PipelineResourceSpec = resource.PipelineResourceSpec - -// SecretParam indicates which secret can be used to populate a field of the resource -type SecretParam = resource.SecretParam - -// ResourceParam declares a string value to use for the parameter called Name, and is used in -// the specific context of PipelineResources. -type ResourceParam = resource.ResourceParam - -// ResourceDeclaration defines an input or output PipelineResource declared as a requirement -// by another type such as a Task or Condition. The Name field will be used to refer to these -// PipelineResources within the type's definition, and when provided as an Input, the Name will be the -// path to the volume mounted containing this PipelineResource as an input (e.g. -// an input Resource named `workspace` will be mounted at `/workspace`). -type ResourceDeclaration = resource.ResourceDeclaration - -// PipelineResourceList contains a list of PipelineResources -type PipelineResourceList = resource.PipelineResourceList diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go deleted file mode 100644 index 6067f2d971..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go +++ /dev/null @@ -1,236 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PipelineSpec defines the desired state of Pipeline. -type PipelineSpec struct { - // Description is a user-facing description of the pipeline that may be - // used to populate a UI. - // +optional - Description string `json:"description,omitempty"` - // Resources declares the names and types of the resources given to the - // Pipeline's tasks as inputs and outputs. - Resources []PipelineDeclaredResource `json:"resources,omitempty"` - // Tasks declares the graph of Tasks that execute when this Pipeline is run. - Tasks []PipelineTask `json:"tasks,omitempty"` - // Params declares a list of input parameters that must be supplied when - // this Pipeline is run. - Params []ParamSpec `json:"params,omitempty"` - // Workspaces declares a set of named workspaces that are expected to be - // provided by a PipelineRun. - // +optional - Workspaces []PipelineWorkspaceDeclaration `json:"workspaces,omitempty"` - // Results are values that this pipeline can output once run - // +optional - Results []PipelineResult `json:"results,omitempty"` -} - -// PipelineResult used to describe the results of a pipeline -type PipelineResult = v1beta1.PipelineResult - -// Check that Pipeline may be validated and defaulted. - -// TaskKind defines the type of Task used by the pipeline. -type TaskKind = v1beta1.TaskKind - -const ( - // NamespacedTaskKind indicates that the task type has a namepace scope. - NamespacedTaskKind TaskKind = v1beta1.NamespacedTaskKind - // ClusterTaskKind indicates that task type has a cluster scope. - ClusterTaskKind TaskKind = v1beta1.ClusterTaskKind -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +genclient:noStatus - -// Pipeline describes a list of Tasks to execute. It expresses how outputs -// of tasks feed into inputs of subsequent tasks. -// +k8s:openapi-gen=true -type Pipeline struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec holds the desired state of the Pipeline from the client - // +optional - Spec PipelineSpec `json:"spec"` - - // Status is deprecated. - // It usually is used to communicate the observed state of the Pipeline from - // the controller, but was unused as there is no controller for Pipeline. - // +optional - Status *PipelineStatus `json:"status,omitempty"` -} - -// PipelineStatus does not contain anything because Pipelines on their own -// do not have a status, they just hold data which is later used by a -// PipelineRun. -// Deprecated -type PipelineStatus struct { -} - -// PipelineMetadata returns the Pipeline's ObjectMeta, implementing PipelineObject. -func (p *Pipeline) PipelineMetadata() metav1.ObjectMeta { - return p.ObjectMeta -} - -// PipelineSpec returns the Pipeline's Spec, implementing PipelineObject. -func (p *Pipeline) PipelineSpec() PipelineSpec { - return p.Spec -} - -// Copy returns a deep copy of the Pipeline, implementing PipelineObject. -func (p *Pipeline) Copy() PipelineObject { - return p.DeepCopy() -} - -// PipelineTask defines a task in a Pipeline, passing inputs from both -// Params and from the output of previous tasks. -type PipelineTask struct { - // Name is the name of this task within the context of a Pipeline. Name is - // used as a coordinate with the `from` and `runAfter` fields to establish - // the execution order of tasks relative to one another. - Name string `json:"name,omitempty"` - - // TaskRef is a reference to a task definition. - // +optional - TaskRef *TaskRef `json:"taskRef,omitempty"` - - // TaskSpec is specification of a task - // +optional - TaskSpec *TaskSpec `json:"taskSpec,omitempty"` - - // Retries represents how many times this task should be retried in case of task failure: ConditionSucceeded set to False - // +optional - Retries int `json:"retries,omitempty"` - - // RunAfter is the list of PipelineTask names that should be executed before - // this Task executes. (Used to force a specific ordering in graph execution.) - // +optional - RunAfter []string `json:"runAfter,omitempty"` - - // Resources declares the resources given to this task as inputs and - // outputs. - // +optional - Resources *PipelineTaskResources `json:"resources,omitempty"` - // Parameters declares parameters passed to this task. - // +optional - Params []Param `json:"params,omitempty"` - - // Workspaces maps workspaces from the pipeline spec to the workspaces - // declared in the Task. - // +optional - Workspaces []WorkspacePipelineTaskBinding `json:"workspaces,omitempty"` - - // Time after which the TaskRun times out. Defaults to 1 hour. - // Specified TaskRun timeout should be less than 24h. - // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` -} - -// HashKey is used as the key for this PipelineTask in the DAG -func (pt PipelineTask) HashKey() string { - return pt.Name -} - -// Deps returns all other PipelineTask dependencies of this PipelineTask, based on resource usage or ordering -func (pt PipelineTask) Deps() []string { - deps := []string{} - deps = append(deps, pt.RunAfter...) - if pt.Resources != nil { - for _, rd := range pt.Resources.Inputs { - deps = append(deps, rd.From...) - } - } - // Add any dependents from task results - for _, param := range pt.Params { - expressions, ok := v1beta1.GetVarSubstitutionExpressionsForParam(param) - if ok { - resultRefs := v1beta1.NewResultRefs(expressions) - for _, resultRef := range resultRefs { - deps = append(deps, resultRef.PipelineTask) - } - } - } - - return deps -} - -// PipelineTaskList is a list of PipelineTasks -type PipelineTaskList []PipelineTask - -// Items returns a slice of all tasks in the PipelineTaskList, converted to dag.Tasks -func (l PipelineTaskList) Items() []dag.Task { - tasks := []dag.Task{} - for _, t := range l { - tasks = append(tasks, dag.Task(t)) - } - return tasks -} - -// Deps returns a map with key as name of a pipelineTask and value as a list of its dependencies -func (l PipelineTaskList) Deps() map[string][]string { - deps := map[string][]string{} - for _, pt := range l { - deps[pt.HashKey()] = pt.Deps() - } - return deps -} - -// PipelineTaskParam is used to provide arbitrary string parameters to a Task. -type PipelineTaskParam = v1beta1.PipelineTaskParam - -// PipelineDeclaredResource is used by a Pipeline to declare the types of the -// PipelineResources that it will required to run and names which can be used to -// refer to these PipelineResources in PipelineTaskResourceBindings. -type PipelineDeclaredResource = v1beta1.PipelineDeclaredResource - -// PipelineTaskResources allows a Pipeline to declare how its DeclaredPipelineResources -// should be provided to a Task as its inputs and outputs. -type PipelineTaskResources = v1beta1.PipelineTaskResources - -// PipelineTaskInputResource maps the name of a declared PipelineResource input -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. This input may come from a previous task. -type PipelineTaskInputResource = v1beta1.PipelineTaskInputResource - -// PipelineTaskOutputResource maps the name of a declared PipelineResource output -// dependency in a Task to the resource in the Pipeline's DeclaredPipelineResources -// that should be used. -type PipelineTaskOutputResource = v1beta1.PipelineTaskOutputResource - -// TaskRef can be used to refer to a specific instance of a task. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type TaskRef = v1beta1.TaskRef - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineList contains a list of Pipeline -type PipelineList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Pipeline `json:"items"` -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go deleted file mode 100644 index 7d8f985b1a..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go +++ /dev/null @@ -1,325 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "github.com/tektoncd/pipeline/pkg/list" - "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" - "github.com/tektoncd/pipeline/pkg/substitution" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*Pipeline)(nil) - -// Validate checks that the Pipeline structure is valid but does not validate -// that any references resources exist, that is done at run time. -func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(p.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return p.Spec.Validate(ctx) -} - -func validateDeclaredResources(ps *PipelineSpec) error { - encountered := sets.NewString() - for _, r := range ps.Resources { - if encountered.Has(r.Name) { - return fmt.Errorf("resource with name %q appears more than once", r.Name) - } - encountered.Insert(r.Name) - } - required := []string{} - for _, t := range ps.Tasks { - if t.Resources != nil { - for _, input := range t.Resources.Inputs { - required = append(required, input.Resource) - } - for _, output := range t.Resources.Outputs { - required = append(required, output.Resource) - } - } - } - - provided := make([]string, 0, len(ps.Resources)) - for _, resource := range ps.Resources { - provided = append(provided, resource.Name) - } - missing := list.DiffLeft(required, provided) - if len(missing) > 0 { - return fmt.Errorf("pipeline declared resources didn't match usage in Tasks: Didn't provide required values: %s", missing) - } - - return nil -} - -func isOutput(outputs []PipelineTaskOutputResource, resource string) bool { - for _, output := range outputs { - if output.Resource == resource { - return true - } - } - return false -} - -// validateFrom ensures that the `from` values make sense: that they rely on values from Tasks -// that ran previously, and that the PipelineResource is actually an output of the Task it should come from. -func validateFrom(tasks []PipelineTask) *apis.FieldError { - taskOutputs := map[string][]PipelineTaskOutputResource{} - for _, task := range tasks { - var to []PipelineTaskOutputResource - if task.Resources != nil { - to = make([]PipelineTaskOutputResource, len(task.Resources.Outputs)) - copy(to, task.Resources.Outputs) - } - taskOutputs[task.Name] = to - } - for _, t := range tasks { - inputResources := []PipelineTaskInputResource{} - if t.Resources != nil { - inputResources = append(inputResources, t.Resources.Inputs...) - } - - for _, rd := range inputResources { - for _, pt := range rd.From { - outputs, found := taskOutputs[pt] - if !found { - return apis.ErrInvalidValue(fmt.Sprintf("expected resource %s to be from task %s, but task %s doesn't exist", rd.Resource, pt, pt), - "spec.tasks.resources.inputs.from") - } - if !isOutput(outputs, rd.Resource) { - return apis.ErrInvalidValue(fmt.Sprintf("the resource %s from %s must be an output but is an input", rd.Resource, pt), - "spec.tasks.resources.inputs.from") - } - } - } - } - return nil -} - -// validateGraph ensures the Pipeline's dependency Graph (DAG) make sense: that there is no dependency -// cycle or that they rely on values from Tasks that ran previously, and that the PipelineResource -// is actually an output of the Task it should come from. -func validateGraph(tasks []PipelineTask) error { - if _, err := dag.Build(PipelineTaskList(tasks), PipelineTaskList(tasks).Deps()); err != nil { - return err - } - return nil -} - -// Validate checks that taskNames in the Pipeline are valid and that the graph -// of Tasks expressed in the Pipeline makes sense. -func (ps *PipelineSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ps, &PipelineSpec{}) { - return apis.ErrGeneric("expected at least one, got none", "spec.description", "spec.params", "spec.resources", "spec.tasks", "spec.workspaces") - } - - // PipelineTask must have a valid unique label and at least one of taskRef or taskSpec should be specified - if err := validatePipelineTasks(ctx, ps.Tasks); err != nil { - return err - } - - // All declared resources should be used, and the Pipeline shouldn't try to use any resources - // that aren't declared - if err := validateDeclaredResources(ps); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.resources") - } - - // The from values should make sense - if err := validateFrom(ps.Tasks); err != nil { - return err - } - - // Validate the pipeline task graph - if err := validateGraph(ps.Tasks); err != nil { - return apis.ErrInvalidValue(err.Error(), "spec.tasks") - } - - // The parameter variables should be valid - if err := validatePipelineParameterVariables(ps.Tasks, ps.Params); err != nil { - return err - } - - // Validate the pipeline's workspaces. - return validatePipelineWorkspaces(ps.Workspaces, ps.Tasks) -} - -func validatePipelineTasks(ctx context.Context, tasks []PipelineTask) *apis.FieldError { - // Names cannot be duplicated - taskNames := sets.NewString() - var err *apis.FieldError - for i, t := range tasks { - if err = validatePipelineTaskName(ctx, "spec.tasks", i, t, taskNames); err != nil { - return err - } - } - return nil -} - -func validatePipelineTaskName(ctx context.Context, prefix string, i int, t PipelineTask, taskNames sets.String) *apis.FieldError { - if errs := validation.IsDNS1123Label(t.Name); len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", t.Name), - Paths: []string{fmt.Sprintf(prefix+"[%d].name", i)}, - Details: "Pipeline Task name must be a valid DNS Label." + - "For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - // can't have both taskRef and taskSpec at the same time - if (t.TaskRef != nil && t.TaskRef.Name != "") && t.TaskSpec != nil { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) - } - // Check that one of TaskRef and TaskSpec is present - if (t.TaskRef == nil || (t.TaskRef != nil && t.TaskRef.Name == "")) && t.TaskSpec == nil { - return apis.ErrMissingOneOf(fmt.Sprintf(prefix+"[%d].taskRef", i), fmt.Sprintf(prefix+"[%d].taskSpec", i)) - } - // Validate TaskSpec if it's present - if t.TaskSpec != nil { - if err := t.TaskSpec.Validate(ctx); err != nil { - return err - } - } - if t.TaskRef != nil && t.TaskRef.Name != "" { - // Task names are appended to the container name, which must exist and - // must be a valid k8s name - if errSlice := validation.IsQualifiedName(t.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].name", i)) - } - // TaskRef name must be a valid k8s name - if errSlice := validation.IsQualifiedName(t.TaskRef.Name); len(errSlice) != 0 { - return apis.ErrInvalidValue(strings.Join(errSlice, ","), fmt.Sprintf(prefix+"[%d].taskRef.name", i)) - } - if _, ok := taskNames[t.Name]; ok { - return apis.ErrMultipleOneOf(fmt.Sprintf(prefix+"[%d].name", i)) - } - taskNames[t.Name] = struct{}{} - } - return nil -} - -func validatePipelineWorkspaces(wss []PipelineWorkspaceDeclaration, pts []PipelineTask) *apis.FieldError { - // Workspace names must be non-empty and unique. - wsTable := sets.NewString() - for i, ws := range wss { - if ws.Name == "" { - return apis.ErrInvalidValue(fmt.Sprintf("workspace %d has empty name", i), "spec.workspaces") - } - if wsTable.Has(ws.Name) { - return apis.ErrInvalidValue(fmt.Sprintf("workspace with name %q appears more than once", ws.Name), "spec.workspaces") - } - wsTable.Insert(ws.Name) - } - - // Any workspaces used in PipelineTasks should have their name declared in the Pipeline's - // Workspaces list. - for ptIdx, pt := range pts { - for wsIdx, ws := range pt.Workspaces { - if _, ok := wsTable[ws.Workspace]; !ok { - return apis.ErrInvalidValue( - fmt.Sprintf("pipeline task %q expects workspace with name %q but none exists in pipeline spec", pt.Name, ws.Workspace), - fmt.Sprintf("spec.tasks[%d].workspaces[%d]", ptIdx, wsIdx), - ) - } - } - } - return nil -} - -func validatePipelineParameterVariables(tasks []PipelineTask, params []ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - - for _, p := range params { - // Verify that p is a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - return apis.ErrInvalidValue(string(p.Type), fmt.Sprintf("spec.params.%s.type", p.Name)) - } - - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("spec.params.%s.type", p.Name), - fmt.Sprintf("spec.params.%s.default.type", p.Name), - }, - } - } - - // Add parameter name to parameterNames, and to arrayParameterNames if type is array. - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - - return validatePipelineVariables(tasks, "params", parameterNames, arrayParameterNames) -} - -func validatePipelineVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) *apis.FieldError { - for _, task := range tasks { - for _, param := range task.Params { - if param.Value.Type == ParamTypeString { - if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, paramNames); err != nil { - return err - } - if err := validatePipelineNoArrayReferenced(fmt.Sprintf("param[%s]", param.Name), param.Value.StringVal, prefix, arrayParamNames); err != nil { - return err - } - } else { - for _, arrayElement := range param.Value.ArrayVal { - if err := validatePipelineVariable(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, paramNames); err != nil { - return err - } - if err := validatePipelineArraysIsolated(fmt.Sprintf("param[%s]", param.Name), arrayElement, prefix, arrayParamNames); err != nil { - return err - } - } - } - } - } - return nil -} - -func validatePipelineVariable(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariable(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} - -func validatePipelineNoArrayReferenced(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibited(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} - -func validatePipelineArraysIsolated(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolated(name, value, prefix, "task parameter", "pipelinespec.params", vars) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go deleted file mode 100644 index 6a89f52382..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go +++ /dev/null @@ -1,114 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*PipelineRun)(nil) - -// ConvertTo implements api.Convertible -func (pr *PipelineRun) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.PipelineRun: - sink.ObjectMeta = pr.ObjectMeta - if err := pr.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - sink.Status = pr.Status - - spec := &v1beta1.PipelineSpec{} - if err := deserializeFinally(&sink.ObjectMeta, spec); err != nil { - return err - } - if len(spec.Finally) > 0 { - if sink.Spec.PipelineSpec == nil { - sink.Spec.PipelineSpec = spec - } else { - sink.Spec.PipelineSpec.Finally = spec.Finally - } - } - return nil - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (prs *PipelineRunSpec) ConvertTo(ctx context.Context, sink *v1beta1.PipelineRunSpec) error { - sink.PipelineRef = prs.PipelineRef - if prs.PipelineSpec != nil { - sink.PipelineSpec = &v1beta1.PipelineSpec{} - if err := prs.PipelineSpec.ConvertTo(ctx, sink.PipelineSpec); err != nil { - return err - } - } - sink.Resources = prs.Resources - sink.Params = prs.Params - sink.ServiceAccountName = prs.ServiceAccountName - sink.Status = prs.Status - sink.Timeout = prs.Timeout - sink.PodTemplate = prs.PodTemplate - sink.Workspaces = prs.Workspaces - return nil -} - -// ConvertFrom implements api.Convertible -func (pr *PipelineRun) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.PipelineRun: - pr.ObjectMeta = source.ObjectMeta - if err := pr.Spec.ConvertFrom(ctx, &source.Spec); err != nil { - return err - } - pr.Status = source.Status - - ps := source.Spec.PipelineSpec - if ps != nil && ps.Finally != nil { - if err := serializeFinally(&pr.ObjectMeta, ps.Finally); err != nil { - return err - } - } - return nil - default: - return fmt.Errorf("unknown version, got: %T", pr) - } -} - -// ConvertFrom implements api.Convertible -func (prs *PipelineRunSpec) ConvertFrom(ctx context.Context, source *v1beta1.PipelineRunSpec) error { - prs.PipelineRef = source.PipelineRef - if source.PipelineSpec != nil { - prs.PipelineSpec = &PipelineSpec{} - if err := prs.PipelineSpec.ConvertFrom(ctx, *source.PipelineSpec); err != nil { - return err - } - } - prs.Resources = source.Resources - prs.Params = source.Params - prs.ServiceAccountName = source.ServiceAccountName - prs.Status = source.Status - prs.Timeout = source.Timeout - prs.PodTemplate = source.PodTemplate - prs.Workspaces = source.Workspaces - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go deleted file mode 100644 index ce1f35ad08..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "time" - - "github.com/tektoncd/pipeline/pkg/apis/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*PipelineRun)(nil) - -// SetDefaults implements apis.Defaultable -func (pr *PipelineRun) SetDefaults(ctx context.Context) { - pr.Spec.SetDefaults(ctx) -} - -// SetDefaults implements apis.Defaultable -func (prs *PipelineRunSpec) SetDefaults(ctx context.Context) { - cfg := config.FromContextOrDefaults(ctx) - if prs.Timeout == nil { - prs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} - } - - defaultSA := cfg.Defaults.DefaultServiceAccount - if prs.ServiceAccountName == "" && defaultSA != "" { - prs.ServiceAccountName = defaultSA - } - - defaultPodTemplate := cfg.Defaults.DefaultPodTemplate - if prs.PodTemplate == nil { - prs.PodTemplate = defaultPodTemplate - } - - if prs.PipelineSpec != nil { - prs.PipelineSpec.SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go deleted file mode 100644 index 2a3682b2da..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright 2019-2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" - "knative.dev/pkg/apis" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineRun represents a single execution of a Pipeline. PipelineRuns are how -// the graph of Tasks declared in a Pipeline are executed; they specify inputs -// to Pipelines such as parameter values and capture operational aspects of the -// Tasks execution such as service account and tolerations. Creating a -// PipelineRun creates TaskRuns for Tasks in the referenced Pipeline. -// -// +k8s:openapi-gen=true -type PipelineRun struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - Spec PipelineRunSpec `json:"spec,omitempty"` - // +optional - Status PipelineRunStatus `json:"status,omitempty"` -} - -// GetName returns the PipelineRun's name -func (pr *PipelineRun) GetName() string { - return pr.ObjectMeta.GetName() -} - -// PipelineRunSpec defines the desired state of PipelineRun -type PipelineRunSpec struct { - // +optional - PipelineRef *PipelineRef `json:"pipelineRef,omitempty"` - // +optional - PipelineSpec *PipelineSpec `json:"pipelineSpec,omitempty"` - // Resources is a list of bindings specifying which actual instances of - // PipelineResources to use for the resources the Pipeline has declared - // it needs. - Resources []PipelineResourceBinding `json:"resources,omitempty"` - // Params is a list of parameter names and values. - Params []Param `json:"params,omitempty"` - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty"` - // Used for cancelling a pipelinerun (and maybe more later on) - // +optional - Status PipelineRunSpecStatus `json:"status,omitempty"` - // Time after which the Pipeline times out. Defaults to never. - // Refer to Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` - // PodTemplate holds pod specific configuration - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` - // Workspaces holds a set of workspace bindings that must match names - // with those declared in the pipeline. - // +optional - Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` - // TaskRunSpecs holds a set of task specific specs - // +optional - TaskRunSpecs []PipelineTaskRunSpec `json:"taskRunSpecs,omitempty"` -} - -// PipelineRunSpecStatus defines the pipelinerun spec status the user can provide -type PipelineRunSpecStatus = v1beta1.PipelineRunSpecStatus - -const ( - // PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task, - // if not already cancelled or terminated - PipelineRunSpecStatusCancelled = v1beta1.PipelineRunSpecStatusCancelledDeprecated -) - -// PipelineResourceRef can be used to refer to a specific instance of a Resource -type PipelineResourceRef = v1beta1.PipelineResourceRef - -// PipelineRef can be used to refer to a specific instance of a Pipeline. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type PipelineRef = v1beta1.PipelineRef - -// PipelineRunStatus defines the observed state of PipelineRun -type PipelineRunStatus = v1beta1.PipelineRunStatus - -// PipelineRunStatusFields holds the fields of PipelineRunStatus' status. -// This is defined separately and inlined so that other types can readily -// consume these fields via duck typing. -type PipelineRunStatusFields = v1beta1.PipelineRunStatusFields - -// PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status -type PipelineRunTaskRunStatus = v1beta1.PipelineRunTaskRunStatus - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PipelineRunList contains a list of PipelineRun -type PipelineRunList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []PipelineRun `json:"items,omitempty"` -} - -// PipelineTaskRun reports the results of running a step in the Task. Each -// task has the potential to succeed or fail (based on the exit code) -// and produces logs. -type PipelineTaskRun = v1beta1.PipelineTaskRun - -// GetGroupVersionKind implements kmeta.OwnerRefable. -func (*PipelineRun) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind(pipeline.PipelineRunControllerName) -} - -// IsDone returns true if the PipelineRun's status indicates that it is done. -func (pr *PipelineRun) IsDone() bool { - return !pr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// HasStarted function check whether pipelinerun has valid start time set in its status -func (pr *PipelineRun) HasStarted() bool { - return pr.Status.StartTime != nil && !pr.Status.StartTime.IsZero() -} - -// IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state -func (pr *PipelineRun) IsCancelled() bool { - return pr.Spec.Status == PipelineRunSpecStatusCancelled -} - -// GetRunKey return the pipelinerun key for timeout handler map -func (pr *PipelineRun) GetRunKey() string { - // The address of the pointer is a threadsafe unique identifier for the pipelinerun - return fmt.Sprintf("%s/%p", pipeline.PipelineRunControllerName, pr) -} - -// IsTimedOut returns true if a pipelinerun has exceeded its spec.Timeout based on its status.Timeout -func (pr *PipelineRun) IsTimedOut(c clock.PassiveClock) bool { - pipelineTimeout := pr.Spec.Timeout - startTime := pr.Status.StartTime - - if !startTime.IsZero() && pipelineTimeout != nil { - timeout := pipelineTimeout.Duration - if timeout == config.NoTimeoutDuration { - return false - } - runtime := c.Since(startTime.Time) - if runtime > timeout { - return true - } - } - return false -} - -// HasVolumeClaimTemplate returns true if PipelineRun contains volumeClaimTemplates that is -// used for creating PersistentVolumeClaims with an OwnerReference for each run -func (pr *PipelineRun) HasVolumeClaimTemplate() bool { - for _, ws := range pr.Spec.Workspaces { - if ws.VolumeClaimTemplate != nil { - return true - } - } - return false -} - -// PipelineTaskRunSpec holds task specific specs -type PipelineTaskRunSpec struct { - PipelineTaskName string `json:"pipelineTaskName,omitempty"` - TaskServiceAccountName string `json:"taskServiceAccountName,omitempty"` - TaskPodTemplate *PodTemplate `json:"taskPodTemplate,omitempty"` -} - -// GetTaskRunSpecs returns the task specific spec for a given -// PipelineTask if configured, otherwise it returns the PipelineRun's default. -func (pr *PipelineRun) GetTaskRunSpecs(pipelineTaskName string) (string, *PodTemplate) { - serviceAccountName := pr.Spec.ServiceAccountName - taskPodTemplate := pr.Spec.PodTemplate - for _, task := range pr.Spec.TaskRunSpecs { - if task.PipelineTaskName == pipelineTaskName { - taskPodTemplate = task.TaskPodTemplate - serviceAccountName = task.TaskServiceAccountName - } - } - return serviceAccountName, taskPodTemplate -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go deleted file mode 100644 index f6396cb433..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*PipelineRun)(nil) - -// Validate pipelinerun -func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - if apis.IsInDelete(ctx) { - return nil - } - return pr.Spec.Validate(ctx) -} - -// Validate pipelinerun spec -func (ps *PipelineRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ps, &PipelineRunSpec{}) { - return apis.ErrMissingField("spec") - } - - // can't have both pipelineRef and pipelineSpec at the same time - if (ps.PipelineRef != nil && ps.PipelineRef.Name != "") && ps.PipelineSpec != nil { - return apis.ErrDisallowedFields("spec.pipelineref", "spec.pipelinespec") - } - - // Check that one of PipelineRef and PipelineSpec is present - if (ps.PipelineRef == nil || (ps.PipelineRef != nil && ps.PipelineRef.Name == "")) && ps.PipelineSpec == nil { - return apis.ErrMissingField("spec.pipelineref.name", "spec.pipelinespec") - } - - // Validate PipelineSpec if it's present - if ps.PipelineSpec != nil { - if err := ps.PipelineSpec.Validate(ctx); err != nil { - return err - } - } - - if ps.Timeout != nil { - // timeout should be a valid duration of at least 0. - if ps.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ps.Timeout.Duration.String()), "spec.timeout") - } - } - - if ps.Workspaces != nil { - wsNames := make(map[string]int) - for idx, ws := range ps.Workspaces { - if prevIdx, alreadyExists := wsNames[ws.Name]; alreadyExists { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace %q provided by pipelinerun more than once, at index %d and %d", ws.Name, prevIdx, idx), - Paths: []string{"spec.workspaces"}, - } - } - wsNames[ws.Name] = idx - } - } - - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go deleted file mode 100644 index 156c66b2d5..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go +++ /dev/null @@ -1,8 +0,0 @@ -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" -) - -// PodTemplate holds pod specific configuration -type PodTemplate = pod.Template diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go index ca83ff9204..42b5e4b18e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go @@ -46,18 +46,6 @@ var ( // Adds the list of known types to Scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Task{}, - &TaskList{}, - &ClusterTask{}, - &ClusterTaskList{}, - &TaskRun{}, - &TaskRunList{}, - &Pipeline{}, - &PipelineList{}, - &PipelineRun{}, - &PipelineRunList{}, - &PipelineResource{}, - &PipelineResourceList{}, &Run{}, &RunList{}, ) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go deleted file mode 100644 index 6aa94913b0..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import "path/filepath" - -// InputResourcePath returns the path where the given input resource -// will get mounted in a Pod -func InputResourcePath(r ResourceDeclaration) string { - return path("/workspace", r) -} - -// OutputResourcePath returns the path to the output resource in a Pod -func OutputResourcePath(r ResourceDeclaration) string { - return path("/workspace/output", r) -} - -func path(root string, r ResourceDeclaration) string { - if r.TargetPath != "" { - if filepath.IsAbs(r.TargetPath) { - return r.TargetPath - } - return filepath.Join("/workspace", r.TargetPath) - } - return filepath.Join(root, r.Name) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go deleted file mode 100644 index d987494e17..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - - "github.com/google/go-cmp/cmp" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// PipelineResourceInterface interface to be implemented by different PipelineResource types -type PipelineResourceInterface interface { - // GetName returns the name of this PipelineResource instance. - GetName() string - // GetType returns the type of this PipelineResource (often a super type, e.g. in the case of storage). - GetType() PipelineResourceType - // Replacements returns all the attributes that this PipelineResource has that - // can be used for variable replacement. - Replacements() map[string]string - // GetOutputTaskModifier returns the TaskModifier instance that should be used on a Task - // in order to add this kind of resource when it is being used as an output. - GetOutputTaskModifier(ts *TaskSpec, path string) (TaskModifier, error) - // GetInputTaskModifier returns the TaskModifier instance that should be used on a Task - // in order to add this kind of resource when it is being used as an input. - GetInputTaskModifier(ts *TaskSpec, path string) (TaskModifier, error) -} - -// TaskModifier is an interface to be implemented by different PipelineResources -type TaskModifier = v1beta1.TaskModifier - -// InternalTaskModifier implements TaskModifier for resources that are built-in to Tekton Pipelines. -type InternalTaskModifier = v1beta1.InternalTaskModifier - -func checkStepNotAlreadyAdded(s Step, steps []Step) error { - for _, step := range steps { - if s.Name == step.Name { - return fmt.Errorf("Step %s cannot be added again", step.Name) - } - } - return nil -} - -// ApplyTaskModifier applies a modifier to the task by appending and prepending steps and volumes. -// If steps with the same name exist in ts an error will be returned. If identical Volumes have -// been added, they will not be added again. If Volumes with the same name but different contents -// have been added, an error will be returned. -// FIXME(vdemeester) de-duplicate this -func ApplyTaskModifier(ts *TaskSpec, tm TaskModifier) error { - steps := tm.GetStepsToPrepend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(steps, ts.Steps...) - - steps = tm.GetStepsToAppend() - for _, step := range steps { - if err := checkStepNotAlreadyAdded(step, ts.Steps); err != nil { - return err - } - } - ts.Steps = append(ts.Steps, steps...) - - volumes := tm.GetVolumes() - for _, volume := range volumes { - var alreadyAdded bool - for _, v := range ts.Volumes { - if volume.Name == v.Name { - // If a Volume with the same name but different contents has already been added, we can't add both - if d := cmp.Diff(volume, v); d != "" { - return fmt.Errorf("tried to add volume %s already added but with different contents", volume.Name) - } - // If an identical Volume has already been added, don't add it again - alreadyAdded = true - } - } - if !alreadyAdded { - ts.Volumes = append(ts.Volumes, volume) - } - } - - return nil -} - -// PipelineResourceBinding connects a reference to an instance of a PipelineResource -// with a PipelineResource dependency that the Pipeline has declared -type PipelineResourceBinding = v1beta1.PipelineResourceBinding - -// PipelineResourceResult used to export the image name and digest as json -type PipelineResourceResult = v1beta1.PipelineResourceResult - -// ResultType used to find out whether a PipelineResourceResult is from a task result or not -type ResultType = v1beta1.ResultType diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go index 4d7e1a94c2..7889cfa8b8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go @@ -47,7 +47,7 @@ type EmbeddedRunSpec struct { // RunSpec defines the desired state of Run type RunSpec struct { // +optional - Ref *TaskRef `json:"ref,omitempty"` + Ref *v1beta1.TaskRef `json:"ref,omitempty"` // Spec is a specification of a custom task // +optional @@ -69,7 +69,7 @@ type RunSpec struct { // PodTemplate holds pod specific configuration // +optional - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` + PodTemplate *v1beta1.PodTemplate `json:"podTemplate,omitempty"` // Time after which the custom-task times out. // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go index 483c918de6..59bcb43c9a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go @@ -19,6 +19,7 @@ package v1alpha1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/validate" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" @@ -66,9 +67,9 @@ func (rs *RunSpec) Validate(ctx context.Context) *apis.FieldError { return apis.ErrMissingField("spec.spec.kind") } } - if err := validateParameters("spec.params", rs.Params); err != nil { + if err := v1beta1.ValidateParameters(ctx, rs.Params).ViaField("spec.params"); err != nil { return err } - return validateWorkspaceBindings(ctx, rs.Workspaces) + return v1beta1.ValidateWorkspaceBindings(ctx, rs.Workspaces).ViaField("spec.workspaces") } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go deleted file mode 100644 index 15f9b9ade2..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplyStepReplacements applies variable interpolation on a Step. -func ApplyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Script = substitution.ApplyReplacements(step.Script, stringReplacements) - ApplyContainerReplacements(step, stringReplacements, arrayReplacements) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go deleted file mode 100644 index 027d1493a5..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*Task)(nil) - -// ConvertTo implements api.Convertible -func (t *Task) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.Task: - sink.ObjectMeta = t.ObjectMeta - return t.Spec.ConvertTo(ctx, &sink.Spec) - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (ts *TaskSpec) ConvertTo(ctx context.Context, sink *v1beta1.TaskSpec) error { - sink.Steps = ts.Steps - sink.Volumes = ts.Volumes - sink.StepTemplate = ts.StepTemplate - sink.Sidecars = ts.Sidecars - sink.Workspaces = ts.Workspaces - sink.Results = ts.Results - sink.Resources = ts.Resources - sink.Params = ts.Params - sink.Description = ts.Description - if ts.Inputs != nil { - if len(ts.Inputs.Params) > 0 && len(ts.Params) > 0 { - // This shouldn't happen as it shouldn't pass validation - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if len(ts.Inputs.Params) > 0 { - sink.Params = make([]v1beta1.ParamSpec, len(ts.Inputs.Params)) - for i, param := range ts.Inputs.Params { - sink.Params[i] = *param.DeepCopy() - } - } - if len(ts.Inputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskResources{} - } - if len(ts.Inputs.Resources) > 0 && ts.Resources != nil && len(ts.Resources.Inputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - sink.Resources.Inputs = make([]v1beta1.TaskResource, len(ts.Inputs.Resources)) - for i, resource := range ts.Inputs.Resources { - sink.Resources.Inputs[i] = v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: resource.Name, - Type: resource.Type, - Description: resource.Description, - TargetPath: resource.TargetPath, - Optional: resource.Optional, - }} - } - } - } - if ts.Outputs != nil && len(ts.Outputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskResources{} - } - if len(ts.Outputs.Resources) > 0 && ts.Resources != nil && len(ts.Resources.Outputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - sink.Resources.Outputs = make([]v1beta1.TaskResource, len(ts.Outputs.Resources)) - for i, resource := range ts.Outputs.Resources { - sink.Resources.Outputs[i] = v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ - Name: resource.Name, - Type: resource.Type, - Description: resource.Description, - TargetPath: resource.TargetPath, - Optional: resource.Optional, - }} - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (t *Task) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.Task: - t.ObjectMeta = source.ObjectMeta - return t.Spec.ConvertFrom(ctx, &source.Spec) - default: - return fmt.Errorf("unknown version, got: %T", t) - } -} - -// ConvertFrom implements api.Convertible -func (ts *TaskSpec) ConvertFrom(ctx context.Context, source *v1beta1.TaskSpec) error { - ts.Steps = source.Steps - ts.Volumes = source.Volumes - ts.StepTemplate = source.StepTemplate - ts.Sidecars = source.Sidecars - ts.Workspaces = source.Workspaces - ts.Results = source.Results - ts.Params = source.Params - ts.Resources = source.Resources - ts.Description = source.Description - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go deleted file mode 100644 index 2c4180b579..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -const ( - // TaskRunResultType default task run result value - TaskRunResultType ResultType = v1beta1.TaskRunResultType - // PipelineResourceResultType default pipeline result value - PipelineResourceResultType ResultType = v1beta1.PipelineResourceResultType - // UnknownResultType default unknown result type value - UnknownResultType ResultType = v1beta1.UnknownResultType -) - -// TaskSpec returns the task's spec -func (t *Task) TaskSpec() TaskSpec { - return t.Spec -} - -// TaskMetadata returns the task's ObjectMeta -func (t *Task) TaskMetadata() metav1.ObjectMeta { - return t.ObjectMeta -} - -// Copy returns a deep copy of the task -func (t *Task) Copy() TaskObject { - return t.DeepCopy() -} - -// TaskSpec defines the desired state of Task. -type TaskSpec struct { - v1beta1.TaskSpec `json:",inline"` - - // Inputs is an optional set of parameters and resources which must be - // supplied by the user when a Task is executed by a TaskRun. - // +optional - Inputs *Inputs `json:"inputs,omitempty"` - // Outputs is an optional set of resources and results produced when this - // Task is run. - // +optional - Outputs *Outputs `json:"outputs,omitempty"` -} - -// TaskResult used to describe the results of a task -type TaskResult = v1beta1.TaskResult - -// Step embeds the Container type, which allows it to include fields not -// provided by Container. -type Step = v1beta1.Step - -// Sidecar has nearly the same data structure as Step, consisting of a Container and an optional Script, but does not have the ability to timeout. -type Sidecar = v1beta1.Sidecar - -// StepTemplate is a template for a Step -type StepTemplate = v1beta1.StepTemplate - -// +genclient -// +genclient:noStatus -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Task represents a collection of sequential steps that are run as part of a -// Pipeline using a set of inputs and producing a set of outputs. Tasks execute -// when TaskRuns are created that provide the input parameters and resources and -// output resources the Task requires. -// -// +k8s:openapi-gen=true -type Task struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata"` - - // Spec holds the desired state of the Task from the client - // +optional - Spec TaskSpec `json:"spec"` -} - -// Inputs are the requirements that a task needs to run a Build. -type Inputs struct { - // Resources is a list of the input resources required to run the task. - // Resources are represented in TaskRuns as bindings to instances of - // PipelineResources. - // +optional - Resources []TaskResource `json:"resources,omitempty"` - // Params is a list of input parameters required to run the task. Params - // must be supplied as inputs in TaskRuns unless they declare a default - // value. - // +optional - Params []ParamSpec `json:"params,omitempty"` -} - -// TaskResource defines an input or output Resource declared as a requirement -// by a Task. The Name field will be used to refer to these Resources within -// the Task definition, and when provided as an Input, the Name will be the -// path to the volume mounted containing this Resource as an input (e.g. -// an input Resource named `workspace` will be mounted at `/workspace`). -type TaskResource = v1beta1.TaskResource - -// Outputs allow a task to declare what data the Build/Task will be producing, -// i.e. results such as logs and artifacts such as images. -type Outputs struct { - // +optional - Results []TestResult `json:"results,omitempty"` - // +optional - Resources []TaskResource `json:"resources,omitempty"` -} - -// TestResult allows a task to specify the location where test logs -// can be found and what format they will be in. -type TestResult struct { - // Name declares the name by which a result is referenced in the Task's - // definition. Results may be referenced by name in the definition of a - // Task's steps. - Name string `json:"name"` - // TODO: maybe this is an enum with types like "go test", "junit", etc. - Format string `json:"format"` - Path string `json:"path"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskList contains a list of Task -type TaskList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []Task `json:"items"` -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go deleted file mode 100644 index a80b5658f6..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go +++ /dev/null @@ -1,437 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "path/filepath" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "github.com/tektoncd/pipeline/pkg/apis/validate" - "github.com/tektoncd/pipeline/pkg/substitution" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/validation" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*Task)(nil) - -// Validate implements apis.Validatable -func (t *Task) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(t.GetObjectMeta()); err != nil { - return err.ViaField("metadata") - } - if apis.IsInDelete(ctx) { - return nil - } - return t.Spec.Validate(ctx) -} - -// Validate implements apis.Validatable -func (ts *TaskSpec) Validate(ctx context.Context) *apis.FieldError { - - if len(ts.Steps) == 0 { - return apis.ErrMissingField("steps") - } - if err := ValidateVolumes(ts.Volumes).ViaField("volumes"); err != nil { - return err - } - if err := validateDeclaredWorkspaces(ts.Workspaces, ts.Steps, ts.StepTemplate); err != nil { - return err - } - mergedSteps, err := v1beta1.MergeStepsWithStepTemplate(ts.StepTemplate, ts.Steps) - if err != nil { - return &apis.FieldError{ - Message: fmt.Sprintf("error merging step template and steps: %s", err), - Paths: []string{"stepTemplate"}, - } - } - - if err := validateSteps(mergedSteps).ViaField("steps"); err != nil { - return err - } - - if ts.Inputs != nil { - if len(ts.Inputs.Params) > 0 && len(ts.Params) > 0 { - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if ts.Resources != nil && len(ts.Resources.Inputs) > 0 && len(ts.Inputs.Resources) > 0 { - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - } - if ts.Outputs != nil { - if ts.Resources != nil && len(ts.Resources.Outputs) > 0 && len(ts.Outputs.Resources) > 0 { - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - } - - // Validate Resources declaration - if err := ts.Resources.Validate(ctx); err != nil { - return err - } - // Validate that the parameters type are correct - if err := v1beta1.ValidateParameterTypes(ctx, ts.Params); err != nil { - return err - } - - // A task doesn't have to have inputs or outputs, but if it does they must be valid. - // A task can't duplicate input or output names. - // Deprecated - if ts.Inputs != nil { - for _, resource := range ts.Inputs.Resources { - if err := validateResourceType(resource, fmt.Sprintf("taskspec.Inputs.Resources.%s.Type", resource.Name)); err != nil { - return err - } - } - if err := checkForDuplicates(ts.Inputs.Resources, "taskspec.Inputs.Resources.Name"); err != nil { - return err - } - if err := validateInputParameterTypes(ts.Inputs); err != nil { - return err - } - } - // Deprecated - if ts.Outputs != nil { - for _, resource := range ts.Outputs.Resources { - if err := validateResourceType(resource, fmt.Sprintf("taskspec.Outputs.Resources.%s.Type", resource.Name)); err != nil { - return err - } - } - if err := checkForDuplicates(ts.Outputs.Resources, "taskspec.Outputs.Resources.Name"); err != nil { - return err - } - } - - // Validate task step names - for _, step := range ts.Steps { - if errs := validation.IsDNS1123Label(step.Name); step.Name != "" && len(errs) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("invalid value %q", step.Name), - Paths: []string{"taskspec.steps.name"}, - Details: "Task step name must be a valid DNS Label, For more info refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - } - } - } - - if err := v1beta1.ValidateParameterVariables(ctx, ts.Steps, ts.Params); err != nil { - return err - } - // Deprecated - if err := validateInputParameterVariables(ts.Steps, ts.Inputs, ts.Params); err != nil { - return err - } - - if err := v1beta1.ValidateResourcesVariables(ctx, ts.Steps, ts.Resources); err != nil { - return err - } - // Deprecated - return validateResourceVariables(ts.Steps, ts.Inputs, ts.Outputs, ts.Resources) -} - -// validateDeclaredWorkspaces will make sure that the declared workspaces do not try to use -// a mount path which conflicts with any other declared workspaces, with the explicitly -// declared volume mounts, or with the stepTemplate. The names must also be unique. -func validateDeclaredWorkspaces(workspaces []WorkspaceDeclaration, steps []Step, stepTemplate *StepTemplate) *apis.FieldError { - mountPaths := sets.NewString() - for _, step := range steps { - for _, vm := range step.VolumeMounts { - mountPaths.Insert(filepath.Clean(vm.MountPath)) - } - } - if stepTemplate != nil { - for _, vm := range stepTemplate.VolumeMounts { - mountPaths.Insert(filepath.Clean(vm.MountPath)) - } - } - - wsNames := sets.NewString() - for _, w := range workspaces { - // Workspace names must be unique - if wsNames.Has(w.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace name %q must be unique", w.Name), - Paths: []string{"workspaces.name"}, - } - } - wsNames.Insert(w.Name) - // Workspaces must not try to use mount paths that are already used - mountPath := filepath.Clean(w.GetMountPath()) - if mountPaths.Has(mountPath) { - return &apis.FieldError{ - Message: fmt.Sprintf("workspace mount path %q must be unique", mountPath), - Paths: []string{"workspaces.mountpath"}, - } - } - mountPaths.Insert(mountPath) - } - return nil -} - -// ValidateVolumes validates a slice of volumes to make sure there are no duplicate names -func ValidateVolumes(volumes []corev1.Volume) *apis.FieldError { - // Task must not have duplicate volume names. - vols := sets.NewString() - for _, v := range volumes { - if vols.Has(v.Name) { - return &apis.FieldError{ - Message: fmt.Sprintf("multiple volumes with same name %q", v.Name), - Paths: []string{"name"}, - } - } - vols.Insert(v.Name) - } - return nil -} - -func validateSteps(steps []Step) *apis.FieldError { - // Task must not have duplicate step names. - names := sets.NewString() - for idx, s := range steps { - if s.Image == "" { - return apis.ErrMissingField("Image") - } - - if s.Script != "" { - if len(s.Command) > 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d script cannot be used with command", idx), - Paths: []string{"script"}, - } - } - } - - if s.Name != "" { - if names.Has(s.Name) { - return apis.ErrInvalidValue(s.Name, "name") - } - names.Insert(s.Name) - } - - for _, vm := range s.VolumeMounts { - if strings.HasPrefix(vm.MountPath, "/tekton/") && - !strings.HasPrefix(vm.MountPath, "/tekton/home") { - return &apis.FieldError{ - Message: fmt.Sprintf("step %d volumeMount cannot be mounted under /tekton/ (volumeMount %q mounted at %q)", idx, vm.Name, vm.MountPath), - Paths: []string{"volumeMounts.mountPath"}, - } - } - if strings.HasPrefix(vm.Name, "tekton-internal-") { - return &apis.FieldError{ - Message: fmt.Sprintf(`step %d volumeMount name %q cannot start with "tekton-internal-"`, idx, vm.Name), - Paths: []string{"volumeMounts.name"}, - } - } - } - } - return nil -} - -func validateInputParameterTypes(inputs *Inputs) *apis.FieldError { - for _, p := range inputs.Params { - // Ensure param has a valid type. - validType := false - for _, allowedType := range AllParamTypes { - if p.Type == allowedType { - validType = true - } - } - if !validType { - return apis.ErrInvalidValue(p.Type, fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name)) - } - - // If a default value is provided, ensure its type matches param's declared type. - if (p.Default != nil) && (p.Default.Type != p.Type) { - return &apis.FieldError{ - Message: fmt.Sprintf( - "\"%v\" type does not match default value's type: \"%v\"", p.Type, p.Default.Type), - Paths: []string{ - fmt.Sprintf("taskspec.inputs.params.%s.type", p.Name), - fmt.Sprintf("taskspec.inputs.params.%s.default.type", p.Name), - }, - } - } - } - return nil -} - -func validateInputParameterVariables(steps []Step, inputs *Inputs, params []v1beta1.ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() - arrayParameterNames := sets.NewString() - - for _, p := range params { - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - // Deprecated - if inputs != nil { - for _, p := range inputs.Params { - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { - arrayParameterNames.Insert(p.Name) - } - } - } - - if err := validateVariables(steps, "params", parameterNames); err != nil { - return err - } - return validateArrayUsage(steps, "params", arrayParameterNames) -} - -func validateResourceVariables(steps []Step, inputs *Inputs, outputs *Outputs, resources *v1beta1.TaskResources) *apis.FieldError { - resourceNames := sets.NewString() - if resources != nil { - for _, r := range resources.Inputs { - resourceNames.Insert(r.Name) - } - for _, r := range resources.Outputs { - resourceNames.Insert(r.Name) - } - } - // Deprecated - if inputs != nil { - for _, r := range inputs.Resources { - resourceNames.Insert(r.Name) - } - } - // Deprecated - if outputs != nil { - for _, r := range outputs.Resources { - resourceNames.Insert(r.Name) - } - } - return validateVariables(steps, "resources", resourceNames) -} - -func validateArrayUsage(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskNoArrayReferenced("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskArraysIsolated(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskArraysIsolated(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskNoArrayReferenced(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } - } - return nil -} - -func validateVariables(steps []Step, prefix string, vars sets.String) *apis.FieldError { - for _, step := range steps { - if err := validateTaskVariable("name", step.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("image", step.Image, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable("workingDir", step.WorkingDir, prefix, vars); err != nil { - return err - } - for i, cmd := range step.Command { - if err := validateTaskVariable(fmt.Sprintf("command[%d]", i), cmd, prefix, vars); err != nil { - return err - } - } - for i, arg := range step.Args { - if err := validateTaskVariable(fmt.Sprintf("arg[%d]", i), arg, prefix, vars); err != nil { - return err - } - } - for _, env := range step.Env { - if err := validateTaskVariable(fmt.Sprintf("env[%s]", env.Name), env.Value, prefix, vars); err != nil { - return err - } - } - for i, v := range step.VolumeMounts { - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].Name", i), v.Name, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].MountPath", i), v.MountPath, prefix, vars); err != nil { - return err - } - if err := validateTaskVariable(fmt.Sprintf("volumeMount[%d].SubPath", i), v.SubPath, prefix, vars); err != nil { - return err - } - } - } - return nil -} - -func validateTaskVariable(name, value, prefix string, vars sets.String) *apis.FieldError { - return substitution.ValidateVariable(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", vars) -} - -func validateTaskNoArrayReferenced(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableProhibited(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", arrayNames) -} - -func validateTaskArraysIsolated(name, value, prefix string, arrayNames sets.String) *apis.FieldError { - return substitution.ValidateVariableIsolated(name, value, "(?:inputs|outputs)."+prefix, "step", "taskspec.steps", arrayNames) -} - -func checkForDuplicates(resources []TaskResource, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - if encountered.Has(strings.ToLower(r.Name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(strings.ToLower(r.Name)) - } - return nil -} - -func validateResourceType(r TaskResource, path string) *apis.FieldError { - for _, allowed := range AllResourceTypes { - if r.Type == allowed { - return nil - } - } - return apis.ErrInvalidValue(r.Type, path) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go deleted file mode 100644 index e41b49ae1e..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - "knative.dev/pkg/apis" -) - -var _ apis.Convertible = (*TaskRun)(nil) - -// ConvertTo implements api.Convertible -func (tr *TaskRun) ConvertTo(ctx context.Context, obj apis.Convertible) error { - switch sink := obj.(type) { - case *v1beta1.TaskRun: - sink.ObjectMeta = tr.ObjectMeta - if err := tr.Spec.ConvertTo(ctx, &sink.Spec); err != nil { - return err - } - sink.Status = tr.Status - return nil - default: - return fmt.Errorf("unknown version, got: %T", sink) - } -} - -// ConvertTo implements api.Convertible -func (trs *TaskRunSpec) ConvertTo(ctx context.Context, sink *v1beta1.TaskRunSpec) error { - sink.ServiceAccountName = trs.ServiceAccountName - sink.TaskRef = trs.TaskRef - if trs.TaskSpec != nil { - sink.TaskSpec = &v1beta1.TaskSpec{} - if err := trs.TaskSpec.ConvertTo(ctx, sink.TaskSpec); err != nil { - return err - } - } - sink.Status = trs.Status - sink.Timeout = trs.Timeout - sink.PodTemplate = trs.PodTemplate - sink.Workspaces = trs.Workspaces - sink.Params = trs.Params - sink.Resources = trs.Resources - // Deprecated fields - if trs.Inputs != nil { - if len(trs.Inputs.Params) > 0 && len(trs.Params) > 0 { - // This shouldn't happen as it shouldn't pass validation - return apis.ErrMultipleOneOf("inputs.params", "params") - } - if len(trs.Inputs.Params) > 0 { - sink.Params = make([]v1beta1.Param, len(trs.Inputs.Params)) - for i, param := range trs.Inputs.Params { - sink.Params[i] = *param.DeepCopy() - } - } - if len(trs.Inputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskRunResources{} - } - if len(trs.Inputs.Resources) > 0 && trs.Resources != nil && len(trs.Resources.Inputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("inputs.resources", "resources.inputs") - } - sink.Resources.Inputs = make([]v1beta1.TaskResourceBinding, len(trs.Inputs.Resources)) - for i, resource := range trs.Inputs.Resources { - sink.Resources.Inputs[i] = v1beta1.TaskResourceBinding{ - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: resource.Name, - ResourceRef: resource.ResourceRef, - ResourceSpec: resource.ResourceSpec, - }, - Paths: resource.Paths, - } - } - } - } - - if trs.Outputs != nil && len(trs.Outputs.Resources) > 0 { - if sink.Resources == nil { - sink.Resources = &v1beta1.TaskRunResources{} - } - if len(trs.Outputs.Resources) > 0 && trs.Resources != nil && len(trs.Resources.Outputs) > 0 { - // This shouldn't happen as it shouldn't pass validation but just in case - return apis.ErrMultipleOneOf("outputs.resources", "resources.outputs") - } - sink.Resources.Outputs = make([]v1beta1.TaskResourceBinding, len(trs.Outputs.Resources)) - for i, resource := range trs.Outputs.Resources { - sink.Resources.Outputs[i] = v1beta1.TaskResourceBinding{ - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: resource.Name, - ResourceRef: resource.ResourceRef, - ResourceSpec: resource.ResourceSpec, - }, - Paths: resource.Paths, - } - } - } - return nil -} - -// ConvertFrom implements api.Convertible -func (tr *TaskRun) ConvertFrom(ctx context.Context, obj apis.Convertible) error { - switch source := obj.(type) { - case *v1beta1.TaskRun: - tr.ObjectMeta = source.ObjectMeta - if err := tr.Spec.ConvertFrom(ctx, &source.Spec); err != nil { - return err - } - tr.Status = source.Status - return nil - default: - return fmt.Errorf("unknown version, got: %T", tr) - } -} - -// ConvertFrom implements api.Convertible -func (trs *TaskRunSpec) ConvertFrom(ctx context.Context, source *v1beta1.TaskRunSpec) error { - trs.ServiceAccountName = source.ServiceAccountName - trs.TaskRef = source.TaskRef - if source.TaskSpec != nil { - trs.TaskSpec = &TaskSpec{} - if err := trs.TaskSpec.ConvertFrom(ctx, source.TaskSpec); err != nil { - return err - } - } - trs.Status = source.Status - trs.Timeout = source.Timeout - trs.PodTemplate = source.PodTemplate - trs.Workspaces = source.Workspaces - trs.Params = source.Params - trs.Resources = source.Resources - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go deleted file mode 100644 index 871703ca84..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "time" - - "github.com/tektoncd/pipeline/pkg/apis/config" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*TaskRun)(nil) - -// ManagedByLabelKey is the label key used to mark what is managing this resource -const ManagedByLabelKey = "app.kubernetes.io/managed-by" - -// SetDefaults implements apis.Defaultable -func (tr *TaskRun) SetDefaults(ctx context.Context) { - ctx = apis.WithinParent(ctx, tr.ObjectMeta) - tr.Spec.SetDefaults(apis.WithinSpec(ctx)) - - // If the TaskRun doesn't have a managed-by label, apply the default - // specified in the config. - cfg := config.FromContextOrDefaults(ctx) - if tr.ObjectMeta.Labels == nil { - tr.ObjectMeta.Labels = map[string]string{} - } - if _, found := tr.ObjectMeta.Labels[ManagedByLabelKey]; !found { - tr.ObjectMeta.Labels[ManagedByLabelKey] = cfg.Defaults.DefaultManagedByLabelValue - } -} - -// SetDefaults implements apis.Defaultable -func (trs *TaskRunSpec) SetDefaults(ctx context.Context) { - cfg := config.FromContextOrDefaults(ctx) - if trs.TaskRef != nil && trs.TaskRef.Kind == "" { - trs.TaskRef.Kind = NamespacedTaskKind - } - - if trs.Timeout == nil { - trs.Timeout = &metav1.Duration{Duration: time.Duration(cfg.Defaults.DefaultTimeoutMinutes) * time.Minute} - } - - defaultSA := cfg.Defaults.DefaultServiceAccount - if trs.ServiceAccountName == "" && defaultSA != "" { - trs.ServiceAccountName = defaultSA - } - - defaultPodTemplate := cfg.Defaults.DefaultPodTemplate - if trs.PodTemplate == nil { - trs.PodTemplate = defaultPodTemplate - } - - // If this taskrun has an embedded task, apply the usual task defaults - if trs.TaskSpec != nil { - trs.TaskSpec.SetDefaults(ctx) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go deleted file mode 100644 index 6f6af8cb47..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ /dev/null @@ -1,266 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "fmt" - "time" - - apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/clock" - "knative.dev/pkg/apis" -) - -// TaskRunSpec defines the desired state of TaskRun -type TaskRunSpec struct { - // +optional - ServiceAccountName string `json:"serviceAccountName"` - // no more than one of the TaskRef and TaskSpec may be specified. - // +optional - TaskRef *TaskRef `json:"taskRef,omitempty"` - // +optional - TaskSpec *TaskSpec `json:"taskSpec,omitempty"` - // Used for cancelling a taskrun (and maybe more later on) - // +optional - Status TaskRunSpecStatus `json:"status,omitempty"` - // Time after which the build times out. Defaults to 10 minutes. - // Specified build timeout should be less than 24h. - // Refer Go's ParseDuration documentation for expected format: https://golang.org/pkg/time/#ParseDuration - // +optional - Timeout *metav1.Duration `json:"timeout,omitempty"` - // PodTemplate holds pod specific configuration - // +optional - PodTemplate *PodTemplate `json:"podTemplate,omitempty"` - // Workspaces is a list of WorkspaceBindings from volumes to workspaces. - // +optional - Workspaces []WorkspaceBinding `json:"workspaces,omitempty"` - // From v1beta1 - // +optional - Params []Param `json:"params,omitempty"` - // +optional - Resources *v1beta1.TaskRunResources `json:"resources,omitempty"` - // Deprecated - // +optional - Inputs *TaskRunInputs `json:"inputs,omitempty"` - // +optional - Outputs *TaskRunOutputs `json:"outputs,omitempty"` -} - -// TaskRunSpecStatus defines the taskrun spec status the user can provide -type TaskRunSpecStatus = v1beta1.TaskRunSpecStatus - -const ( - // TaskRunSpecStatusCancelled indicates that the user wants to cancel the task, - // if not already cancelled or terminated - TaskRunSpecStatusCancelled = v1beta1.TaskRunSpecStatusCancelled - - // TaskRunReasonCancelled indicates that the TaskRun has been cancelled - // because it was requested so by the user - TaskRunReasonCancelled = v1beta1.TaskRunSpecStatusCancelled -) - -// TaskRunInputs holds the input values that this task was invoked with. -type TaskRunInputs struct { - // +optional - Resources []TaskResourceBinding `json:"resources,omitempty"` - // +optional - Params []Param `json:"params,omitempty"` -} - -// TaskResourceBinding points to the PipelineResource that -// will be used for the Task input or output called Name. -type TaskResourceBinding = v1beta1.TaskResourceBinding - -// TaskRunOutputs holds the output values that this task was invoked with. -type TaskRunOutputs struct { - // +optional - Resources []TaskResourceBinding `json:"resources,omitempty"` -} - -// TaskRunStatus defines the observed state of TaskRun -type TaskRunStatus = v1beta1.TaskRunStatus - -// TaskRunStatusFields holds the fields of TaskRun's status. This is defined -// separately and inlined so that other types can readily consume these fields -// via duck typing. -type TaskRunStatusFields = v1beta1.TaskRunStatusFields - -// TaskRunResult used to describe the results of a task -type TaskRunResult = v1beta1.TaskRunResult - -// StepState reports the results of running a step in the Task. -type StepState = v1beta1.StepState - -// SidecarState reports the results of sidecar in the Task. -type SidecarState = v1beta1.SidecarState - -// CloudEventDelivery is the target of a cloud event along with the state of -// delivery. -type CloudEventDelivery = v1beta1.CloudEventDelivery - -// CloudEventCondition is a string that represents the condition of the event. -type CloudEventCondition = v1beta1.CloudEventCondition - -const ( - // CloudEventConditionUnknown means that the condition for the event to be - // triggered was not met yet, or we don't know the state yet. - CloudEventConditionUnknown CloudEventCondition = v1beta1.CloudEventConditionUnknown - // CloudEventConditionSent means that the event was sent successfully - CloudEventConditionSent CloudEventCondition = v1beta1.CloudEventConditionSent - // CloudEventConditionFailed means that there was one or more attempts to - // send the event, and none was successful so far. - CloudEventConditionFailed CloudEventCondition = v1beta1.CloudEventConditionFailed -) - -// CloudEventDeliveryState reports the state of a cloud event to be sent. -type CloudEventDeliveryState = v1beta1.CloudEventDeliveryState - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskRun represents a single execution of a Task. TaskRuns are how the steps -// specified in a Task are executed; they specify the parameters and resources -// used to run the steps in a Task. -// -// +k8s:openapi-gen=true -type TaskRun struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty"` - - // +optional - Spec TaskRunSpec `json:"spec,omitempty"` - // +optional - Status TaskRunStatus `json:"status,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TaskRunList contains a list of TaskRun -type TaskRunList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - Items []TaskRun `json:"items"` -} - -// GetGroupVersionKind implements kmeta.OwnerRefable. -func (*TaskRun) GetGroupVersionKind() schema.GroupVersionKind { - return SchemeGroupVersion.WithKind(pipeline.TaskRunControllerName) -} - -// GetPipelineRunPVCName for taskrun gets pipelinerun -func (tr *TaskRun) GetPipelineRunPVCName() string { - if tr == nil { - return "" - } - for _, ref := range tr.GetOwnerReferences() { - if ref.Kind == pipeline.PipelineRunControllerName { - return fmt.Sprintf("%s-pvc", ref.Name) - } - } - return "" -} - -// HasPipelineRunOwnerReference returns true of TaskRun has -// owner reference of type PipelineRun -func (tr *TaskRun) HasPipelineRunOwnerReference() bool { - for _, ref := range tr.GetOwnerReferences() { - if ref.Kind == pipeline.PipelineRunControllerName { - return true - } - } - return false -} - -// IsDone returns true if the TaskRun's status indicates that it is done. -func (tr *TaskRun) IsDone() bool { - return !tr.Status.GetCondition(apis.ConditionSucceeded).IsUnknown() -} - -// HasStarted function check whether taskrun has valid start time set in its status -func (tr *TaskRun) HasStarted() bool { - return tr.Status.StartTime != nil && !tr.Status.StartTime.IsZero() -} - -// IsSuccessful returns true if the TaskRun's status indicates that it is done. -func (tr *TaskRun) IsSuccessful() bool { - return tr.Status.GetCondition(apis.ConditionSucceeded).IsTrue() -} - -// IsCancelled returns true if the TaskRun's spec status is set to Cancelled state -func (tr *TaskRun) IsCancelled() bool { - return tr.Spec.Status == TaskRunSpecStatusCancelled -} - -// HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout -func (tr *TaskRun) HasTimedOut(c clock.PassiveClock) bool { - if tr.Status.StartTime.IsZero() { - return false - } - timeout := tr.GetTimeout() - // If timeout is set to 0 or defaulted to 0, there is no timeout. - if timeout == apisconfig.NoTimeoutDuration { - return false - } - runtime := c.Since(tr.Status.StartTime.Time) - return runtime > timeout -} - -// GetTimeout returns the timeout for the TaskRun, or the default if not specified -func (tr *TaskRun) GetTimeout() time.Duration { - // Use the platform default is no timeout is set - if tr.Spec.Timeout == nil { - return apisconfig.DefaultTimeoutMinutes * time.Minute - } - return tr.Spec.Timeout.Duration -} - -// GetRunKey return the taskrun key for timeout handler map -func (tr *TaskRun) GetRunKey() string { - // The address of the pointer is a threadsafe unique identifier for the taskrun - return fmt.Sprintf("%s/%p", "TaskRun", tr) -} - -// IsPartOfPipeline return true if TaskRun is a part of a Pipeline. -// It also return the name of Pipeline and PipelineRun -func (tr *TaskRun) IsPartOfPipeline() (bool, string, string) { - if tr == nil || len(tr.Labels) == 0 { - return false, "", "" - } - - if pl, ok := tr.Labels[pipeline.PipelineLabelKey]; ok { - return true, pl, tr.Labels[pipeline.PipelineRunLabelKey] - } - - return false, "", "" -} - -// HasVolumeClaimTemplate returns true if TaskRun contains volumeClaimTemplates that is -// used for creating PersistentVolumeClaims with an OwnerReference for each run -func (tr *TaskRun) HasVolumeClaimTemplate() bool { - for _, ws := range tr.Spec.Workspaces { - if ws.VolumeClaimTemplate != nil { - return true - } - } - return false -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go deleted file mode 100644 index a898c21d6d..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "context" - "fmt" - "strings" - - "github.com/tektoncd/pipeline/pkg/apis/validate" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/util/sets" - "knative.dev/pkg/apis" -) - -var _ apis.Validatable = (*TaskRun)(nil) - -// Validate taskrun -func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError { - if err := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata"); err != nil { - return err - } - if apis.IsInDelete(ctx) { - return nil - } - return tr.Spec.Validate(ctx) -} - -// Validate taskrun spec -func (ts *TaskRunSpec) Validate(ctx context.Context) *apis.FieldError { - if equality.Semantic.DeepEqual(ts, &TaskRunSpec{}) { - return apis.ErrMissingField("spec") - } - - // can't have both taskRef and taskSpec at the same time - if (ts.TaskRef != nil && ts.TaskRef.Name != "") && ts.TaskSpec != nil { - return apis.ErrDisallowedFields("spec.taskref", "spec.taskspec") - } - - // Check that one of TaskRef and TaskSpec is present - if (ts.TaskRef == nil || (ts.TaskRef != nil && ts.TaskRef.Name == "")) && ts.TaskSpec == nil { - return apis.ErrMissingField("spec.taskref.name", "spec.taskspec") - } - - // Validate TaskSpec if it's present - if ts.TaskSpec != nil { - if err := ts.TaskSpec.Validate(ctx); err != nil { - return err - } - } - - // Deprecated - // check for input resources - if ts.Inputs != nil { - if err := ts.Inputs.Validate(ctx, "spec.Inputs"); err != nil { - return err - } - } - - // Deprecated - // check for output resources - if ts.Outputs != nil { - if err := ts.Outputs.Validate(ctx, "spec.Outputs"); err != nil { - return err - } - } - - // Validate Resources - if err := ts.Resources.Validate(ctx); err != nil { - return err - } - - if err := validateWorkspaceBindings(ctx, ts.Workspaces); err != nil { - return err - } - if err := validateParameters("spec.inputs.params", ts.Params); err != nil { - return err - } - - if ts.Timeout != nil { - // timeout should be a valid duration of at least 0. - if ts.Timeout.Duration < 0 { - return apis.ErrInvalidValue(fmt.Sprintf("%s should be >= 0", ts.Timeout.Duration.String()), "spec.timeout") - } - } - - return nil -} - -// Validate implements apis.Validatable -func (i TaskRunInputs) Validate(ctx context.Context, path string) *apis.FieldError { - if err := validatePipelineResources(ctx, i.Resources, fmt.Sprintf("%s.Resources.Name", path)); err != nil { - return err - } - return validateParameters("spec.inputs.params", i.Params) -} - -// Validate implements apis.Validatable -func (o TaskRunOutputs) Validate(ctx context.Context, path string) *apis.FieldError { - return validatePipelineResources(ctx, o.Resources, fmt.Sprintf("%s.Resources.Name", path)) -} - -// validateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. -func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) *apis.FieldError { - seen := sets.NewString() - for _, w := range wb { - if seen.Has(w.Name) { - return apis.ErrMultipleOneOf("spec.workspaces.name") - } - seen.Insert(w.Name) - - if err := w.Validate(ctx).ViaField("workspace"); err != nil { - return err - } - } - - return nil -} - -// validatePipelineResources validates that -// 1. resource is not declared more than once -// 2. if both resource reference and resource spec is defined at the same time -// 3. at least resource ref or resource spec is defined -func validatePipelineResources(ctx context.Context, resources []TaskResourceBinding, path string) *apis.FieldError { - encountered := sets.NewString() - for _, r := range resources { - // We should provide only one binding for each resource required by the Task. - name := strings.ToLower(r.Name) - if encountered.Has(strings.ToLower(name)) { - return apis.ErrMultipleOneOf(path) - } - encountered.Insert(name) - // Check that both resource ref and resource Spec are not present - if r.ResourceRef != nil && r.ResourceSpec != nil { - return apis.ErrDisallowedFields(fmt.Sprintf("%s.ResourceRef", path), fmt.Sprintf("%s.ResourceSpec", path)) - } - // Check that one of resource ref and resource Spec is present - if (r.ResourceRef == nil || r.ResourceRef.Name == "") && r.ResourceSpec == nil { - return apis.ErrMissingField(fmt.Sprintf("%s.ResourceRef", path), fmt.Sprintf("%s.ResourceSpec", path)) - } - if r.ResourceSpec != nil && r.ResourceSpec.Validate(ctx) != nil { - return r.ResourceSpec.Validate(ctx) - } - } - - return nil -} - -// TODO(jasonhall): Share this with v1beta1/taskrun_validation.go -func validateParameters(path string, params []Param) *apis.FieldError { - // Template must not duplicate parameter names. - seen := sets.NewString() - for _, p := range params { - if seen.Has(strings.ToLower(p.Name)) { - return apis.ErrMultipleOneOf(path) - } - seen.Insert(p.Name) - } - return nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go deleted file mode 100644 index 4dfd2c5a67..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2019 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" -) - -// WorkspaceDeclaration is a declaration of a volume that a Task requires. -type WorkspaceDeclaration = v1beta1.WorkspaceDeclaration - -// WorkspaceBinding maps a Task's declared workspace to a Volume. -type WorkspaceBinding = v1beta1.WorkspaceBinding - -// PipelineWorkspaceDeclaration creates a named slot in a Pipeline that a PipelineRun -// is expected to populate with a workspace binding. -type PipelineWorkspaceDeclaration = v1beta1.PipelineWorkspaceDeclaration - -// WorkspacePipelineTaskBinding describes how a workspace passed into the pipeline should be -// mapped to a task's declared workspace. -type WorkspacePipelineTaskBinding = v1beta1.WorkspacePipelineTaskBinding diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 77c31346c8..47123e63dd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -29,481 +29,20 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTask) DeepCopyInto(out *ClusterTask) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTask. -func (in *ClusterTask) DeepCopy() *ClusterTask { - if in == nil { - return nil - } - out := new(ClusterTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTask) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterTaskList) DeepCopyInto(out *ClusterTaskList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTaskList. -func (in *ClusterTaskList) DeepCopy() *ClusterTaskList { - if in == nil { - return nil - } - out := new(ClusterTaskList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterTaskList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Metadata.DeepCopyInto(&out.Metadata) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedRunSpec. -func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec { - if in == nil { - return nil - } - out := new(EmbeddedRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Inputs) DeepCopyInto(out *Inputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResource, len(*in)) - copy(*out, *in) - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.ParamSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Inputs. -func (in *Inputs) DeepCopy() *Inputs { - if in == nil { - return nil - } - out := new(Inputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Outputs) DeepCopyInto(out *Outputs) { - *out = *in - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]TestResult, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Outputs. -func (in *Outputs) DeepCopy() *Outputs { - if in == nil { - return nil - } - out := new(Outputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pipeline) DeepCopyInto(out *Pipeline) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - if in.Status != nil { - in, out := &in.Status, &out.Status - *out = new(PipelineStatus) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pipeline. -func (in *Pipeline) DeepCopy() *Pipeline { - if in == nil { - return nil - } - out := new(Pipeline) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pipeline) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineList) DeepCopyInto(out *PipelineList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pipeline, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineList. -func (in *PipelineList) DeepCopy() *PipelineList { - if in == nil { - return nil - } - out := new(PipelineList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRun) DeepCopyInto(out *PipelineRun) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRun. -func (in *PipelineRun) DeepCopy() *PipelineRun { - if in == nil { - return nil - } - out := new(PipelineRun) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineRun) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunList) DeepCopyInto(out *PipelineRunList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PipelineRun, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunList. -func (in *PipelineRunList) DeepCopy() *PipelineRunList { - if in == nil { - return nil - } - out := new(PipelineRunList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PipelineRunList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineRunSpec) DeepCopyInto(out *PipelineRunSpec) { - *out = *in - if in.PipelineRef != nil { - in, out := &in.PipelineRef, &out.PipelineRef - *out = new(v1beta1.PipelineRef) - (*in).DeepCopyInto(*out) - } - if in.PipelineSpec != nil { - in, out := &in.PipelineSpec, &out.PipelineSpec - *out = new(PipelineSpec) - (*in).DeepCopyInto(*out) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.PipelineResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - if in.PodTemplate != nil { - in, out := &in.PodTemplate, &out.PodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspaceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TaskRunSpecs != nil { - in, out := &in.TaskRunSpecs, &out.TaskRunSpecs - *out = make([]PipelineTaskRunSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineRunSpec. -func (in *PipelineRunSpec) DeepCopy() *PipelineRunSpec { - if in == nil { - return nil - } - out := new(PipelineRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.PipelineDeclaredResource, len(*in)) - copy(*out, *in) - } - if in.Tasks != nil { - in, out := &in.Tasks, &out.Tasks - *out = make([]PipelineTask, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.ParamSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.PipelineWorkspaceDeclaration, len(*in)) - copy(*out, *in) - } - if in.Results != nil { - in, out := &in.Results, &out.Results - *out = make([]v1beta1.PipelineResult, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineSpec. -func (in *PipelineSpec) DeepCopy() *PipelineSpec { - if in == nil { - return nil - } - out := new(PipelineSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineStatus) DeepCopyInto(out *PipelineStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineStatus. -func (in *PipelineStatus) DeepCopy() *PipelineStatus { - if in == nil { - return nil - } - out := new(PipelineStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineTask) DeepCopyInto(out *PipelineTask) { - *out = *in - if in.TaskRef != nil { - in, out := &in.TaskRef, &out.TaskRef - *out = new(v1beta1.TaskRef) - (*in).DeepCopyInto(*out) - } - if in.TaskSpec != nil { - in, out := &in.TaskSpec, &out.TaskSpec - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } - if in.RunAfter != nil { - in, out := &in.RunAfter, &out.RunAfter - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(v1beta1.PipelineTaskResources) - (*in).DeepCopyInto(*out) - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspacePipelineTaskBinding, len(*in)) - copy(*out, *in) - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTask. -func (in *PipelineTask) DeepCopy() *PipelineTask { - if in == nil { - return nil - } - out := new(PipelineTask) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in PipelineTaskList) DeepCopyInto(out *PipelineTaskList) { - { - in := &in - *out = make(PipelineTaskList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskList. -func (in PipelineTaskList) DeepCopy() PipelineTaskList { - if in == nil { - return nil - } - out := new(PipelineTaskList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { +func (in *EmbeddedRunSpec) DeepCopyInto(out *EmbeddedRunSpec) { *out = *in - if in.TaskPodTemplate != nil { - in, out := &in.TaskPodTemplate, &out.TaskPodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } + out.TypeMeta = in.TypeMeta + in.Metadata.DeepCopyInto(&out.Metadata) + in.Spec.DeepCopyInto(&out.Spec) return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PipelineTaskRunSpec. -func (in *PipelineTaskRunSpec) DeepCopy() *PipelineTaskRunSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedRunSpec. +func (in *EmbeddedRunSpec) DeepCopy() *EmbeddedRunSpec { if in == nil { return nil } - out := new(PipelineTaskRunSpec) + out := new(EmbeddedRunSpec) in.DeepCopyInto(out) return out } @@ -618,285 +157,3 @@ func (in *RunSpec) DeepCopy() *RunSpec { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Task) DeepCopyInto(out *Task) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Task. -func (in *Task) DeepCopy() *Task { - if in == nil { - return nil - } - out := new(Task) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Task) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskList) DeepCopyInto(out *TaskList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Task, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskList. -func (in *TaskList) DeepCopy() *TaskList { - if in == nil { - return nil - } - out := new(TaskList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRun) DeepCopyInto(out *TaskRun) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRun. -func (in *TaskRun) DeepCopy() *TaskRun { - if in == nil { - return nil - } - out := new(TaskRun) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskRun) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunInputs) DeepCopyInto(out *TaskRunInputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunInputs. -func (in *TaskRunInputs) DeepCopy() *TaskRunInputs { - if in == nil { - return nil - } - out := new(TaskRunInputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunList) DeepCopyInto(out *TaskRunList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]TaskRun, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunList. -func (in *TaskRunList) DeepCopy() *TaskRunList { - if in == nil { - return nil - } - out := new(TaskRunList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TaskRunList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunOutputs) DeepCopyInto(out *TaskRunOutputs) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]v1beta1.TaskResourceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunOutputs. -func (in *TaskRunOutputs) DeepCopy() *TaskRunOutputs { - if in == nil { - return nil - } - out := new(TaskRunOutputs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { - *out = *in - if in.TaskRef != nil { - in, out := &in.TaskRef, &out.TaskRef - *out = new(v1beta1.TaskRef) - (*in).DeepCopyInto(*out) - } - if in.TaskSpec != nil { - in, out := &in.TaskSpec, &out.TaskSpec - *out = new(TaskSpec) - (*in).DeepCopyInto(*out) - } - if in.Timeout != nil { - in, out := &in.Timeout, &out.Timeout - *out = new(v1.Duration) - **out = **in - } - if in.PodTemplate != nil { - in, out := &in.PodTemplate, &out.PodTemplate - *out = new(pod.Template) - (*in).DeepCopyInto(*out) - } - if in.Workspaces != nil { - in, out := &in.Workspaces, &out.Workspaces - *out = make([]v1beta1.WorkspaceBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Params != nil { - in, out := &in.Params, &out.Params - *out = make([]v1beta1.Param, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = new(v1beta1.TaskRunResources) - (*in).DeepCopyInto(*out) - } - if in.Inputs != nil { - in, out := &in.Inputs, &out.Inputs - *out = new(TaskRunInputs) - (*in).DeepCopyInto(*out) - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(TaskRunOutputs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskRunSpec. -func (in *TaskRunSpec) DeepCopy() *TaskRunSpec { - if in == nil { - return nil - } - out := new(TaskRunSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { - *out = *in - in.TaskSpec.DeepCopyInto(&out.TaskSpec) - if in.Inputs != nil { - in, out := &in.Inputs, &out.Inputs - *out = new(Inputs) - (*in).DeepCopyInto(*out) - } - if in.Outputs != nil { - in, out := &in.Outputs, &out.Outputs - *out = new(Outputs) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TaskSpec. -func (in *TaskSpec) DeepCopy() *TaskSpec { - if in == nil { - return nil - } - out := new(TaskSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TestResult) DeepCopyInto(out *TestResult) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestResult. -func (in *TestResult) DeepCopy() *TestResult { - if in == nil { - return nil - } - out := new(TestResult) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_conversion.go index fb07cc0035..5a9040e2d8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_conversion.go @@ -27,10 +27,16 @@ var _ apis.Convertible = (*ClusterTask)(nil) // ConvertTo implements api.Convertible func (ct *ClusterTask) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) } // ConvertFrom implements api.Convertible func (ct *ClusterTask) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest know version, got: %T", source) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go index afeba8d78d..127f94a0ea 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go @@ -27,9 +27,9 @@ var _ apis.Validatable = (*ClusterTask)(nil) // Validate performs validation of the metadata and spec of this ClusterTask. func (t *ClusterTask) Validate(ctx context.Context) *apis.FieldError { - errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") if apis.IsInDelete(ctx) { return nil } + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go deleted file mode 100644 index 5274507f77..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" - corev1 "k8s.io/api/core/v1" -) - -// applyStepReplacements returns a StepContainer with variable interpolation applied. -func applyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - c := step.ToK8sContainer() - applyContainerReplacements(c, stringReplacements, arrayReplacements) - step.SetContainerFields(*c) -} - -// applySidecarReplacements returns a SidecarContainer with variable interpolation applied. -func applySidecarReplacements(sidecar *Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) { - c := sidecar.ToK8sContainer() - applyContainerReplacements(c, stringReplacements, arrayReplacements) - sidecar.SetContainerFields(*c) -} - -func applyContainerReplacements(c *corev1.Container, stringReplacements map[string]string, arrayReplacements map[string][]string) { - c.Name = substitution.ApplyReplacements(c.Name, stringReplacements) - c.Image = substitution.ApplyReplacements(c.Image, stringReplacements) - c.ImagePullPolicy = corev1.PullPolicy(substitution.ApplyReplacements(string(c.ImagePullPolicy), stringReplacements)) - - // Use ApplyArrayReplacements here, as additional args may be added via an array parameter. - var newArgs []string - for _, a := range c.Args { - newArgs = append(newArgs, substitution.ApplyArrayReplacements(a, stringReplacements, arrayReplacements)...) - } - c.Args = newArgs - - for ie, e := range c.Env { - c.Env[ie].Value = substitution.ApplyReplacements(e.Value, stringReplacements) - if c.Env[ie].ValueFrom != nil { - if e.ValueFrom.SecretKeyRef != nil { - c.Env[ie].ValueFrom.SecretKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.LocalObjectReference.Name, stringReplacements) - c.Env[ie].ValueFrom.SecretKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.SecretKeyRef.Key, stringReplacements) - } - if e.ValueFrom.ConfigMapKeyRef != nil { - c.Env[ie].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name, stringReplacements) - c.Env[ie].ValueFrom.ConfigMapKeyRef.Key = substitution.ApplyReplacements(e.ValueFrom.ConfigMapKeyRef.Key, stringReplacements) - } - } - } - - for ie, e := range c.EnvFrom { - c.EnvFrom[ie].Prefix = substitution.ApplyReplacements(e.Prefix, stringReplacements) - if e.ConfigMapRef != nil { - c.EnvFrom[ie].ConfigMapRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.ConfigMapRef.LocalObjectReference.Name, stringReplacements) - } - if e.SecretRef != nil { - c.EnvFrom[ie].SecretRef.LocalObjectReference.Name = substitution.ApplyReplacements(e.SecretRef.LocalObjectReference.Name, stringReplacements) - } - } - c.WorkingDir = substitution.ApplyReplacements(c.WorkingDir, stringReplacements) - - // Use ApplyArrayReplacements here, as additional commands may be added via an array parameter. - var newCommand []string - for _, c := range c.Command { - newCommand = append(newCommand, substitution.ApplyArrayReplacements(c, stringReplacements, arrayReplacements)...) - } - c.Command = newCommand - - for iv, v := range c.VolumeMounts { - c.VolumeMounts[iv].Name = substitution.ApplyReplacements(v.Name, stringReplacements) - c.VolumeMounts[iv].MountPath = substitution.ApplyReplacements(v.MountPath, stringReplacements) - c.VolumeMounts[iv].SubPath = substitution.ApplyReplacements(v.SubPath, stringReplacements) - } -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go index 69bc2f2064..c2ffbe37bd 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_types.go @@ -209,32 +209,47 @@ type Step struct { // stopAndFail indicates exit the taskRun if the container exits with non-zero exit code // continue indicates continue executing the rest of the steps irrespective of the container exit code OnError string `json:"onError,omitempty"` + + // Stores configuration for the stdout stream of the step. + // +optional + StdoutConfig *StepOutputConfig `json:"stdoutConfig,omitempty"` + // Stores configuration for the stderr stream of the step. + // +optional + StderrConfig *StepOutputConfig `json:"stderrConfig,omitempty"` +} + +// StepOutputConfig stores configuration for a step output stream. +type StepOutputConfig struct { + // Path to duplicate stdout stream to on container's local filesystem. + // +optional + Path string `json:"path,omitempty"` } // ToK8sContainer converts the Step to a Kubernetes Container struct func (s *Step) ToK8sContainer() *corev1.Container { return &corev1.Container{ - Name: s.Name, - Image: s.Image, - Command: s.Command, - Args: s.Args, - WorkingDir: s.WorkingDir, - Ports: s.DeprecatedPorts, - EnvFrom: s.EnvFrom, - Env: s.Env, - Resources: s.Resources, - VolumeMounts: s.VolumeMounts, - VolumeDevices: s.VolumeDevices, - LivenessProbe: s.DeprecatedLivenessProbe, - ReadinessProbe: s.DeprecatedReadinessProbe, - StartupProbe: s.DeprecatedStartupProbe, - Lifecycle: s.DeprecatedLifecycle, - TerminationMessagePath: s.DeprecatedTerminationMessagePath, - ImagePullPolicy: s.ImagePullPolicy, - SecurityContext: s.SecurityContext, - Stdin: s.DeprecatedStdin, - StdinOnce: s.DeprecatedStdinOnce, - TTY: s.DeprecatedTTY, + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.DeprecatedPorts, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.DeprecatedLivenessProbe, + ReadinessProbe: s.DeprecatedReadinessProbe, + StartupProbe: s.DeprecatedStartupProbe, + Lifecycle: s.DeprecatedLifecycle, + TerminationMessagePath: s.DeprecatedTerminationMessagePath, + TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.DeprecatedStdin, + StdinOnce: s.DeprecatedStdinOnce, + TTY: s.DeprecatedTTY, } } @@ -256,6 +271,7 @@ func (s *Step) SetContainerFields(c corev1.Container) { s.DeprecatedStartupProbe = c.StartupProbe s.DeprecatedLifecycle = c.Lifecycle s.DeprecatedTerminationMessagePath = c.TerminationMessagePath + s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy s.ImagePullPolicy = c.ImagePullPolicy s.SecurityContext = c.SecurityContext s.DeprecatedStdin = c.Stdin @@ -460,6 +476,7 @@ func (s *StepTemplate) SetContainerFields(c corev1.Container) { s.DeprecatedStartupProbe = c.StartupProbe s.DeprecatedLifecycle = c.Lifecycle s.DeprecatedTerminationMessagePath = c.TerminationMessagePath + s.DeprecatedTerminationMessagePolicy = c.TerminationMessagePolicy s.ImagePullPolicy = c.ImagePullPolicy s.SecurityContext = c.SecurityContext s.DeprecatedStdin = c.Stdin @@ -470,27 +487,28 @@ func (s *StepTemplate) SetContainerFields(c corev1.Container) { // ToK8sContainer converts the StepTemplate to a Kubernetes Container struct func (s *StepTemplate) ToK8sContainer() *corev1.Container { return &corev1.Container{ - Name: s.DeprecatedName, - Image: s.Image, - Command: s.Command, - Args: s.Args, - WorkingDir: s.WorkingDir, - Ports: s.DeprecatedPorts, - EnvFrom: s.EnvFrom, - Env: s.Env, - Resources: s.Resources, - VolumeMounts: s.VolumeMounts, - VolumeDevices: s.VolumeDevices, - LivenessProbe: s.DeprecatedLivenessProbe, - ReadinessProbe: s.DeprecatedReadinessProbe, - StartupProbe: s.DeprecatedStartupProbe, - Lifecycle: s.DeprecatedLifecycle, - TerminationMessagePath: s.DeprecatedTerminationMessagePath, - ImagePullPolicy: s.ImagePullPolicy, - SecurityContext: s.SecurityContext, - Stdin: s.DeprecatedStdin, - StdinOnce: s.DeprecatedStdinOnce, - TTY: s.DeprecatedTTY, + Name: s.DeprecatedName, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.DeprecatedPorts, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.DeprecatedLivenessProbe, + ReadinessProbe: s.DeprecatedReadinessProbe, + StartupProbe: s.DeprecatedStartupProbe, + Lifecycle: s.DeprecatedLifecycle, + TerminationMessagePath: s.DeprecatedTerminationMessagePath, + TerminationMessagePolicy: s.DeprecatedTerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.DeprecatedStdin, + StdinOnce: s.DeprecatedStdinOnce, + TTY: s.DeprecatedTTY, } } @@ -682,27 +700,28 @@ type Sidecar struct { // ToK8sContainer converts the Sidecar to a Kubernetes Container struct func (s *Sidecar) ToK8sContainer() *corev1.Container { return &corev1.Container{ - Name: s.Name, - Image: s.Image, - Command: s.Command, - Args: s.Args, - WorkingDir: s.WorkingDir, - Ports: s.Ports, - EnvFrom: s.EnvFrom, - Env: s.Env, - Resources: s.Resources, - VolumeMounts: s.VolumeMounts, - VolumeDevices: s.VolumeDevices, - LivenessProbe: s.LivenessProbe, - ReadinessProbe: s.ReadinessProbe, - StartupProbe: s.StartupProbe, - Lifecycle: s.Lifecycle, - TerminationMessagePath: s.TerminationMessagePath, - ImagePullPolicy: s.ImagePullPolicy, - SecurityContext: s.SecurityContext, - Stdin: s.Stdin, - StdinOnce: s.StdinOnce, - TTY: s.TTY, + Name: s.Name, + Image: s.Image, + Command: s.Command, + Args: s.Args, + WorkingDir: s.WorkingDir, + Ports: s.Ports, + EnvFrom: s.EnvFrom, + Env: s.Env, + Resources: s.Resources, + VolumeMounts: s.VolumeMounts, + VolumeDevices: s.VolumeDevices, + LivenessProbe: s.LivenessProbe, + ReadinessProbe: s.ReadinessProbe, + StartupProbe: s.StartupProbe, + Lifecycle: s.Lifecycle, + TerminationMessagePath: s.TerminationMessagePath, + TerminationMessagePolicy: s.TerminationMessagePolicy, + ImagePullPolicy: s.ImagePullPolicy, + SecurityContext: s.SecurityContext, + Stdin: s.Stdin, + StdinOnce: s.StdinOnce, + TTY: s.TTY, } } @@ -724,6 +743,7 @@ func (s *Sidecar) SetContainerFields(c corev1.Container) { s.StartupProbe = c.StartupProbe s.Lifecycle = c.Lifecycle s.TerminationMessagePath = c.TerminationMessagePath + s.TerminationMessagePolicy = c.TerminationMessagePolicy s.ImagePullPolicy = c.ImagePullPolicy s.SecurityContext = c.SecurityContext s.Stdin = c.Stdin diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go index deced8e667..335a43f777 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go @@ -59,7 +59,7 @@ func MergeStepsWithStepTemplate(template *StepTemplate, steps []Step) ([]Step, e } // Pass through original step Script, for later conversion. - newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout} + newStep := Step{Script: s.Script, OnError: s.OnError, Timeout: s.Timeout, StdoutConfig: s.StdoutConfig, StderrConfig: s.StderrConfig} newStep.SetContainerFields(merged) steps[i] = newStep } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go index 2f7b249752..64b1040f24 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go @@ -76,6 +76,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SidecarState": schema_pkg_apis_pipeline_v1beta1_SidecarState(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.SkippedTask": schema_pkg_apis_pipeline_v1beta1_SkippedTask(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Step": schema_pkg_apis_pipeline_v1beta1_Step(ref), + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig": schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepState": schema_pkg_apis_pipeline_v1beta1_StepState(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepTemplate": schema_pkg_apis_pipeline_v1beta1_StepTemplate(ref), "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Task": schema_pkg_apis_pipeline_v1beta1_Task(ref), @@ -1109,7 +1110,7 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRef(ref common.ReferenceCallback) return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "PipelineRef can be used to refer to a specific instance of a Pipeline. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + Description: "PipelineRef can be used to refer to a specific instance of a Pipeline.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -1255,6 +1256,13 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallbac Format: "", }, }, + "type": { + SchemaProps: spec.SchemaProps{ + Description: "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.", + Type: []string{"string"}, + Format: "", + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a human-readable description of the result", @@ -1266,15 +1274,16 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineResult(ref common.ReferenceCallbac "value": { SchemaProps: spec.SchemaProps{ Description: "Value the expression used to retrieve the value", - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"), }, }, }, Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, } } @@ -1391,15 +1400,16 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineRunResult(ref common.ReferenceCall "value": { SchemaProps: spec.SchemaProps{ Description: "Value is the result returned from the execution of this PipelineRun", - Default: "", - Type: []string{"string"}, - Format: "", + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"), }, }, }, Required: []string{"name", "value"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.ArrayOrString"}, } } @@ -2476,11 +2486,17 @@ func schema_pkg_apis_pipeline_v1beta1_PipelineTaskRunSpec(ref common.ReferenceCa Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata"), }, }, + "computeResources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PipelineTaskMetadata", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "k8s.io/api/core/v1.ResourceRequirements"}, } } @@ -3338,12 +3354,44 @@ func schema_pkg_apis_pipeline_v1beta1_Step(ref common.ReferenceCallback) common. Format: "", }, }, + "stdoutConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stdout stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"), + }, + }, + "stderrConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Stores configuration for the stderr stream of the step.", + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.StepOutputConfig", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceUsage", "k8s.io/api/core/v1.ContainerPort", "k8s.io/api/core/v1.EnvFromSource", "k8s.io/api/core/v1.EnvVar", "k8s.io/api/core/v1.Lifecycle", "k8s.io/api/core/v1.Probe", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext", "k8s.io/api/core/v1.VolumeDevice", "k8s.io/api/core/v1.VolumeMount", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + } +} + +func schema_pkg_apis_pipeline_v1beta1_StepOutputConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "StepOutputConfig stores configuration for a step output stream.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path to duplicate stdout stream to on container's local filesystem.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, } } @@ -3757,7 +3805,7 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRef(ref common.ReferenceCallback) comm return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "TaskRef can be used to refer to a specific instance of a task. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + Description: "TaskRef can be used to refer to a specific instance of a task.", Type: []string{"object"}, Properties: map[string]spec.Schema{ "name": { @@ -3974,6 +4022,21 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResult(ref common.ReferenceCallback) c Format: "", }, }, + "properties": { + SchemaProps: spec.SchemaProps{ + Description: "Properties is the JSON Schema properties to support key-value pairs results.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"), + }, + }, + }, + }, + }, "description": { SchemaProps: spec.SchemaProps{ Description: "Description is a human-readable description of the result", @@ -3985,6 +4048,8 @@ func schema_pkg_apis_pipeline_v1beta1_TaskResult(ref common.ReferenceCallback) c Required: []string{"name"}, }, }, + Dependencies: []string{ + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.PropertySpec"}, } } @@ -4451,11 +4516,17 @@ func schema_pkg_apis_pipeline_v1beta1_TaskRunSpec(ref common.ReferenceCallback) }, }, }, + "computeResources": { + SchemaProps: spec.SchemaProps{ + Description: "Compute resources to use for this TaskRun", + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, + "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod.Template", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.Param", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRef", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunDebug", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunResources", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunSidecarOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskRunStepOverride", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.TaskSpec", "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1.WorkspaceBinding", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/apimachinery/pkg/apis/meta/v1.Duration"}, } } @@ -5128,12 +5199,24 @@ func schema_pkg_apis_pipeline_v1beta1_WorkspaceBinding(ref common.ReferenceCallb Ref: ref("k8s.io/api/core/v1.SecretVolumeSource"), }, }, + "projected": { + SchemaProps: spec.SchemaProps{ + Description: "Projected represents a projected volume that should populate this workspace.", + Ref: ref("k8s.io/api/core/v1.ProjectedVolumeSource"), + }, + }, + "csi": { + SchemaProps: spec.SchemaProps{ + Description: "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + Ref: ref("k8s.io/api/core/v1.CSIVolumeSource"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, + "k8s.io/api/core/v1.CSIVolumeSource", "k8s.io/api/core/v1.ConfigMapVolumeSource", "k8s.io/api/core/v1.EmptyDirVolumeSource", "k8s.io/api/core/v1.PersistentVolumeClaim", "k8s.io/api/core/v1.PersistentVolumeClaimVolumeSource", "k8s.io/api/core/v1.ProjectedVolumeSource", "k8s.io/api/core/v1.SecretVolumeSource"}, } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go index 068278264f..23e71a52ef 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go @@ -61,7 +61,7 @@ type PropertySpec struct { } // SetDefaults set the default type -func (pp *ParamSpec) SetDefaults(ctx context.Context) { +func (pp *ParamSpec) SetDefaults(context.Context) { if pp == nil { return } @@ -72,11 +72,11 @@ func (pp *ParamSpec) SetDefaults(ctx context.Context) { switch { case pp.Type != "": // If param type is provided by the author, do nothing but just set default type for PropertySpec in case `properties` section is provided. - pp.setDefaultsForProperties(ctx) + pp.setDefaultsForProperties() case pp.Properties != nil: pp.Type = ParamTypeObject // Also set default type for PropertySpec - pp.setDefaultsForProperties(ctx) + pp.setDefaultsForProperties() case pp.Default == nil: // ParamTypeString is the default value (when no type can be inferred from the default value) pp.Type = ParamTypeString @@ -92,7 +92,7 @@ func (pp *ParamSpec) SetDefaults(ctx context.Context) { } // setDefaultsForProperties sets default type for PropertySpec (string) if it's not specified -func (pp *ParamSpec) setDefaultsForProperties(ctx context.Context) { +func (pp *ParamSpec) setDefaultsForProperties() { for key, propertySpec := range pp.Properties { if propertySpec.Type == "" { pp.Properties[key] = PropertySpec{Type: ParamTypeString} @@ -195,15 +195,58 @@ func (arrayOrString ArrayOrString) MarshalJSON() ([]byte, error) { } // ApplyReplacements applyes replacements for ArrayOrString type -func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string) { - if arrayOrString.Type == ParamTypeString { - arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) - } else { - var newArrayVal []string +func (arrayOrString *ArrayOrString) ApplyReplacements(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + switch arrayOrString.Type { + case ParamTypeArray: + newArrayVal := []string{} for _, v := range arrayOrString.ArrayVal { newArrayVal = append(newArrayVal, substitution.ApplyArrayReplacements(v, stringReplacements, arrayReplacements)...) } arrayOrString.ArrayVal = newArrayVal + case ParamTypeObject: + newObjectVal := map[string]string{} + for k, v := range arrayOrString.ObjectVal { + newObjectVal[k] = substitution.ApplyReplacements(v, stringReplacements) + } + arrayOrString.ObjectVal = newObjectVal + default: + arrayOrString.applyOrCorrect(stringReplacements, arrayReplacements, objectReplacements) + } +} + +// applyOrCorrect deals with string param whose value can be string literal or a reference to a string/array/object param/result. +// If the value of arrayOrString is a reference to array or object, the type will be corrected from string to array/object. +func (arrayOrString *ArrayOrString) applyOrCorrect(stringReplacements map[string]string, arrayReplacements map[string][]string, objectReplacements map[string]map[string]string) { + stringVal := arrayOrString.StringVal + + // if the stringVal is a string literal or a string that mixed with var references + // just do the normal string replacement + if !exactVariableSubstitutionRegex.MatchString(stringVal) { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + return + } + + // trim the head "$(" and the tail ")" or "[*])" + // i.e. get "params.name" from "$(params.name)" or "$(params.name[*])" + trimedStringVal := substitution.StripStarVarSubExpression(stringVal) + + // if the stringVal is a reference to a string param + if _, ok := stringReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = substitution.ApplyReplacements(arrayOrString.StringVal, stringReplacements) + } + + // if the stringVal is a reference to an array param, we need to change the type other than apply replacement + if _, ok := arrayReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ArrayVal = substitution.ApplyArrayReplacements(stringVal, stringReplacements, arrayReplacements) + arrayOrString.Type = ParamTypeArray + } + + // if the stringVal is a reference an object param, we need to change the type other than apply replacement + if _, ok := objectReplacements[trimedStringVal]; ok { + arrayOrString.StringVal = "" + arrayOrString.ObjectVal = objectReplacements[trimedStringVal] + arrayOrString.Type = ParamTypeObject } } @@ -236,23 +279,32 @@ func ArrayReference(a string) string { return strings.TrimSuffix(strings.TrimPrefix(a, "$("+ParamsPrefix+"."), "[*])") } -func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +// validatePipelineParametersVariablesInTaskParameters validates param value that +// may contain the reference(s) to other params to make sure those references are used appropriately. +func validatePipelineParametersVariablesInTaskParameters(params []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for _, param := range params { - if param.Value.Type == ParamTypeString { - errs = errs.Also(validateStringVariable(param.Value.StringVal, prefix, paramNames, arrayParamNames).ViaFieldKey("params", param.Name)) - } else { + switch param.Value.Type { + case ParamTypeArray: for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name)) + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("params", param.Name)) } + case ParamTypeObject: + for key, val := range param.Value.ObjectVal { + errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldKey("properties", key).ViaFieldKey("params", param.Name)) + } + default: + errs = errs.Also(validateParamStringValue(param, prefix, paramNames, arrayParamNames, objectParamNameKeys)) } } return errs } -func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +// validatePipelineParametersVariablesInMatrixParameters validates matrix param value +// that may contain the reference(s) to other params to make sure those references are used appropriately. +func validatePipelineParametersVariablesInMatrixParameters(matrix []Param, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for _, param := range matrix { for idx, arrayElement := range param.Value.ArrayVal { - errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) + errs = errs.Also(validateArrayVariable(arrayElement, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaFieldIndex("value", idx).ViaFieldKey("matrix", param.Name)) } } return errs @@ -263,10 +315,6 @@ func validateParametersInTaskMatrix(matrix []Param) (errs *apis.FieldError) { if param.Value.Type != ParamTypeArray { errs = errs.Also(apis.ErrInvalidValue("parameters of type array only are allowed in matrix", "").ViaFieldKey("matrix", param.Name)) } - // results are not yet allowed in parameters in a matrix - dynamic fanning out will be supported in future milestone - if expressions, ok := GetVarSubstitutionExpressionsForParam(param); ok && LooksLikeContainsResultRefs(expressions) { - return errs.Also(apis.ErrInvalidValue("result references are not allowed in parameters in a matrix", "value").ViaFieldKey("matrix", param.Name)) - } } return errs } @@ -284,12 +332,42 @@ func validateParameterInOneOfMatrixOrParams(matrix []Param, params []Param) (err return errs } -func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { +// validateParamStringValue validates the param value field of string type +// that may contain references to other isolated array/object params other than string param. +func validateParamStringValue(param Param, prefix string, paramNames sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + stringValue := param.Value.StringVal + + // if the provided param value is an isolated reference to the whole array/object, we just check if the param name exists. + isIsolated, errs := substitution.ValidateWholeArrayOrObjectRefInStringVariable(param.Name, stringValue, prefix, paramNames) + if isIsolated { + return errs + } + + // if the provided param value is string literal and/or contains multiple variables + // valid example: "$(params.myString) and another $(params.myObject.key1)" + // invalid example: "$(params.myString) and another $(params.myObject[*])" + return validateStringVariable(stringValue, prefix, paramNames, arrayVars, objectParamNameKeys).ViaFieldKey("params", param.Name) +} + +// validateStringVariable validates the normal string fields that can only accept references to string param or individual keys of object param +func validateStringVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) return errs.Also(substitution.ValidateVariableProhibitedP(value, prefix, arrayVars)) } -func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String) *apis.FieldError { +func validateArrayVariable(value, prefix string, stringVars sets.String, arrayVars sets.String, objectParamNameKeys map[string][]string) *apis.FieldError { errs := substitution.ValidateVariableP(value, prefix, stringVars) + errs = errs.Also(validateObjectVariable(value, prefix, objectParamNameKeys)) return errs.Also(substitution.ValidateVariableIsolatedP(value, prefix, arrayVars)) } + +func validateObjectVariable(value, prefix string, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { + objectNames := sets.NewString() + for objectParamName, keys := range objectParamNameKeys { + objectNames.Insert(objectParamName) + errs = errs.Also(substitution.ValidateVariableP(value, fmt.Sprintf("%s\\.%s", prefix, objectParamName), sets.NewString(keys...))) + } + + return errs.Also(substitution.ValidateEntireVariableProhibitedP(value, prefix, objectNames)) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go index 73da674ac9..b908f958bc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go @@ -27,10 +27,16 @@ var _ apis.Convertible = (*Pipeline)(nil) // ConvertTo implements api.Convertible func (p *Pipeline) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) } // ConvertFrom implements api.Convertible func (p *Pipeline) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest know version, got: %T", source) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go index caa2f6a052..ecacac87ae 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/apis/version" "github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -122,12 +123,17 @@ type PipelineResult struct { // Name the given name Name string `json:"name"` + // Type is the user-specified type of the result. + // The possible types are 'string', 'array', and 'object', with 'string' as the default. + // 'array' and 'object' types are alpha features. + Type ResultsType `json:"type,omitempty"` + // Description is a human-readable description of the result // +optional Description string `json:"description"` // Value the expression used to retrieve the value - Value string `json:"value"` + Value ArrayOrString `json:"value"` } // PipelineTaskMetadata contains the labels or annotations for an EmbeddedTask @@ -301,7 +307,10 @@ func (pt *PipelineTask) validateMatrix(ctx context.Context) (errs *apis.FieldErr if len(pt.Matrix) != 0 { // This is an alpha feature and will fail validation if it's used in a pipeline spec // when the enable-api-fields feature gate is anything but "alpha". - errs = errs.Also(ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "matrix", config.AlphaAPIFields)) + // Matrix requires "embedded-status" feature gate to be set to "minimal", and will fail + // validation if it is anything but "minimal". + errs = errs.Also(ValidateEmbeddedStatus(ctx, "matrix", config.MinimalEmbeddedStatus)) errs = errs.Also(pt.validateMatrixCombinationsCount(ctx)) } errs = errs.Also(validateParameterInOneOfMatrixOrParams(pt.Matrix, pt.Params)) @@ -318,6 +327,26 @@ func (pt *PipelineTask) validateMatrixCombinationsCount(ctx context.Context) (er return errs } +func (pt PipelineTask) validateEmbeddedOrType() (errs *apis.FieldError) { + // Reject cases where APIVersion and/or Kind are specified alongside an embedded Task. + // We determine if this is an embedded Task by checking of TaskSpec.TaskSpec.Steps has items. + if pt.TaskSpec != nil && len(pt.TaskSpec.TaskSpec.Steps) > 0 { + if pt.TaskSpec.APIVersion != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.apiVersion cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.apiVersion"}, + }) + } + if pt.TaskSpec.Kind != "" { + errs = errs.Also(&apis.FieldError{ + Message: "taskSpec.kind cannot be specified when using taskSpec.steps", + Paths: []string{"taskSpec.kind"}, + }) + } + } + return +} + // GetMatrixCombinationsCount returns the count of combinations of Parameters generated from the Matrix in PipelineTask. func (pt *PipelineTask) GetMatrixCombinationsCount() int { if len(pt.Matrix) == 0 { @@ -461,6 +490,9 @@ func (pt PipelineTask) ValidateName() *apis.FieldError { // calls the validation routine based on the type of the task func (pt PipelineTask) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(pt.validateRefOrSpec()) + + errs = errs.Also(pt.validateEmbeddedOrType()) + cfg := config.FromContextOrDefaults(ctx) // If EnableCustomTasks feature flag is on, validate custom task specifications // pipeline task having taskRef with APIVersion is classified as custom task diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index c96a86c5cd..a7613c23ea 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -35,10 +35,10 @@ var _ apis.Validatable = (*Pipeline)(nil) // Validate checks that the Pipeline structure is valid but does not validate // that any references resources exist, that is done at run time. func (p *Pipeline) Validate(ctx context.Context) *apis.FieldError { - errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") if apis.IsInDelete(ctx) { return nil } + errs := validate.ObjectMetadata(p.GetObjectMeta()).ViaField("metadata") return errs.Also(p.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } @@ -69,7 +69,7 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Tasks).ViaField("tasks")) errs = errs.Also(validatePipelineWorkspacesUsage(ps.Workspaces, ps.Finally).ViaField("finally")) // Validate the pipeline's results - errs = errs.Also(validatePipelineResults(ps.Results)) + errs = errs.Also(validatePipelineResults(ps.Results, ps.Tasks)) errs = errs.Also(validateTasksAndFinallySection(ps)) errs = errs.Also(validateFinalTasks(ps.Tasks, ps.Finally)) errs = errs.Also(validateWhenExpressions(ps.Tasks, ps.Finally)) @@ -128,6 +128,7 @@ func validatePipelineWorkspacesUsage(wss []PipelineWorkspaceDeclaration, pts []P func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTask, params []ParamSpec) (errs *apis.FieldError) { parameterNames := sets.NewString() arrayParameterNames := sets.NewString() + objectParameterNameKeys := map[string][]string{} // validates all the types within a slice of ParamSpecs errs = errs.Also(ValidateParameterTypes(ctx, params).ViaField("params")) @@ -141,16 +142,21 @@ func validatePipelineParameterVariables(ctx context.Context, tasks []PipelineTas if p.Type == ParamTypeArray { arrayParameterNames.Insert(p.Name) } - } - return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames)) + if p.Type == ParamTypeObject { + for k := range p.Properties { + objectParameterNameKeys[p.Name] = append(objectParameterNameKeys[p.Name], k) + } + } + } + return errs.Also(validatePipelineParametersVariables(tasks, "params", parameterNames, arrayParameterNames, objectParameterNameKeys)) } -func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +func validatePipelineParametersVariables(tasks []PipelineTask, prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, task := range tasks { - errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames).ViaIndex(idx)) - errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames).ViaIndex(idx)) - errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames).ViaIndex(idx)) + errs = errs.Also(validatePipelineParametersVariablesInTaskParameters(task.Params, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(validatePipelineParametersVariablesInMatrixParameters(task.Matrix, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) + errs = errs.Also(task.WhenExpressions.validatePipelineParametersVariables(prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaIndex(idx)) } return errs } @@ -249,16 +255,17 @@ func filter(arr []string, cond func(string) bool) []string { } // validatePipelineResults ensure that pipeline result variables are properly configured -func validatePipelineResults(results []PipelineResult) (errs *apis.FieldError) { +func validatePipelineResults(results []PipelineResult, tasks []PipelineTask) (errs *apis.FieldError) { + pipelineTaskNames := getPipelineTasksNames(tasks) for idx, result := range results { expressions, ok := GetVarSubstitutionExpressionsForPipelineResult(result) if !ok { - return errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but no expressions were found", + errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but no expressions were found", "value").ViaFieldIndex("results", idx)) } if !LooksLikeContainsResultRefs(expressions) { - return errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but an invalid expressions was found", + errs = errs.Also(apis.ErrInvalidValue("expected pipeline results to be task result expressions but an invalid expressions was found", "value").ViaFieldIndex("results", idx)) } @@ -269,11 +276,42 @@ func validatePipelineResults(results []PipelineResult) (errs *apis.FieldError) { "value").ViaFieldIndex("results", idx)) } + if !taskContainsResult(result.Value.StringVal, pipelineTaskNames) { + errs = errs.Also(apis.ErrInvalidValue("referencing a nonexistent task", + "value").ViaFieldIndex("results", idx)) + } } return errs } +// put task names in a set +func getPipelineTasksNames(pipelineTasks []PipelineTask) sets.String { + pipelineTaskNames := make(sets.String) + for _, pipelineTask := range pipelineTasks { + pipelineTaskNames.Insert(pipelineTask.Name) + } + + return pipelineTaskNames +} + +// taskContainsResult ensures the result value is referenced within the +// task names +func taskContainsResult(resultExpression string, pipelineTaskNames sets.String) bool { + // split incase of multiple resultExpressions in the same result.Value string + // i.e "$(task.) - $(task2.)" + split := strings.Split(resultExpression, "$") + for _, expression := range split { + if expression != "" { + pipelineTaskName, _, _, _, _ := parseExpression(stripVarSubExpression("$" + expression)) + if !pipelineTaskNames.Has(pipelineTaskName) { + return false + } + } + } + return true +} + func validateTasksAndFinallySection(ps *PipelineSpec) *apis.FieldError { if len(ps.Finally) != 0 && len(ps.Tasks) == 0 { return apis.ErrInvalidValue(fmt.Sprintf("spec.tasks is empty but spec.finally has %d tasks", len(ps.Finally)), "finally") diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go new file mode 100644 index 0000000000..17bd57428c --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_types.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// PipelineRef can be used to refer to a specific instance of a Pipeline. +type PipelineRef struct { + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name,omitempty"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Bundle url reference to a Tekton Bundle. + // +optional + Bundle string `json:"bundle,omitempty"` + + // ResolverRef allows referencing a Pipeline in a remote location + // like a git repo. This field is only supported when the alpha + // feature gate is enabled. + // +optional + ResolverRef `json:",omitempty"` +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go index 1a61b7b219..45300a5480 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelineref_validation.go @@ -18,71 +18,58 @@ package v1beta1 import ( "context" + "fmt" "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) // Validate ensures that a supplied PipelineRef field is populated // correctly. No errors are returned for a nil PipelineRef. func (ref *PipelineRef) Validate(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) if ref == nil { return } - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - errs = errs.Also(ref.validateAlphaRef(ctx)) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) - } - return -} - -// validateInTreeRef returns errors if the given pipelineRef is not -// valid for Pipelines' built-in resolution machinery. -func (ref *PipelineRef) validateInTreeRef(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if ref.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("resolver")) - } - if ref.Resource != nil { - errs = errs.Also(apis.ErrDisallowedFields("resource")) - } - if ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if cfg.FeatureFlags.EnableTektonOCIBundles { - if ref.Bundle != "" && ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if ref.Bundle != "" { - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } - } - } else if ref.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("bundle")) - } - return -} -// validateAlphaRef ensures that the user has passed either a -// valid remote resource reference or a valid in-tree resource reference, -// but not both. -func (ref *PipelineRef) validateAlphaRef(ctx context.Context) (errs *apis.FieldError) { switch { - case ref.Resolver == "" && ref.Resource != nil: - errs = errs.Also(apis.ErrMissingField("resolver")) - case ref.Resolver == "": - errs = errs.Also(ref.validateInTreeRef(ctx)) - default: + case ref.Resolver != "": + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) } if ref.Bundle != "" { errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) } + case ref.Resource != nil: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) + } + if ref.Bundle != "" { + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) + } + if ref.Resolver == "" { + errs = errs.Also(apis.ErrMissingField("resolver")) + } + case ref.Name == "": + errs = errs.Also(apis.ErrMissingField("name")) + case ref.Bundle != "": + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) + } } return } + +func validateBundleFeatureFlag(ctx context.Context, featureName string, wantValue bool) *apis.FieldError { + flagValue := config.FromContextOrDefaults(ctx).FeatureFlags.EnableTektonOCIBundles + if flagValue != wantValue { + var errs *apis.FieldError + message := fmt.Sprintf(`%s requires "enable-tekton-oci-bundles" feature gate to be %t but it is %t`, featureName, wantValue, flagValue) + return errs.Also(apis.ErrGeneric(message)) + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go index e213f10eec..c1f4a3a126 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go @@ -27,10 +27,16 @@ var _ apis.Convertible = (*PipelineRun)(nil) // ConvertTo implements api.Convertible func (pr *PipelineRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) } // ConvertFrom implements api.Convertible func (pr *PipelineRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest know version, got: %T", source) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go index 5274e1373a..93500365c9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go @@ -20,6 +20,7 @@ import ( "context" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "github.com/tektoncd/pipeline/pkg/apis/config" @@ -83,7 +84,7 @@ func (pr *PipelineRun) HasStarted() bool { // IsCancelled returns true if the PipelineRun's spec status is set to Cancelled state func (pr *PipelineRun) IsCancelled() bool { - return pr.Spec.Status == PipelineRunSpecStatusCancelled || pr.Spec.Status == PipelineRunSpecStatusCancelledDeprecated + return pr.Spec.Status == PipelineRunSpecStatusCancelled } // IsGracefullyCancelled returns true if the PipelineRun's spec status is set to CancelledRunFinally state @@ -242,10 +243,6 @@ type TimeoutFields struct { type PipelineRunSpecStatus string const ( - // PipelineRunSpecStatusCancelledDeprecated Deprecated: indicates that the user wants to cancel the task, - // if not already cancelled or terminated (replaced by "Cancelled") - PipelineRunSpecStatusCancelledDeprecated = "PipelineRunCancelled" - // PipelineRunSpecStatusCancelled indicates that the user wants to cancel the task, // if not already cancelled or terminated PipelineRunSpecStatusCancelled = "Cancelled" @@ -264,25 +261,6 @@ const ( PipelineRunSpecStatusPending = "PipelineRunPending" ) -// PipelineRef can be used to refer to a specific instance of a Pipeline. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type PipelineRef struct { - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name,omitempty"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Bundle url reference to a Tekton Bundle. - // +optional - Bundle string `json:"bundle,omitempty"` - - // ResolverRef allows referencing a Pipeline in a remote location - // like a git repo. This field is only supported when the alpha - // feature gate is enabled. - // +optional - ResolverRef `json:",omitempty"` -} - // PipelineRunStatus defines the observed state of PipelineRun type PipelineRunStatus struct { duckv1beta1.Status `json:",inline"` @@ -481,7 +459,7 @@ type PipelineRunResult struct { Name string `json:"name"` // Value is the result returned from the execution of this PipelineRun - Value string `json:"value"` + Value ArrayOrString `json:"value"` } // PipelineRunTaskRunStatus contains the name of the PipelineTask for this TaskRun and the TaskRun's Status @@ -540,6 +518,9 @@ type PipelineTaskRunSpec struct { // +optional Metadata *PipelineTaskMetadata `json:"metadata,omitempty"` + + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` } // GetTaskRunSpec returns the task specific spec for a given diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go index 482cda177e..a1a3cbccb0 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go @@ -22,8 +22,8 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/config" - apisconfig "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) @@ -32,12 +32,12 @@ var _ apis.Validatable = (*PipelineRun)(nil) // Validate pipelinerun func (pr *PipelineRun) Validate(ctx context.Context) *apis.FieldError { - errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata") - if apis.IsInDelete(ctx) { return nil } + errs := validate.ObjectMetadata(pr.GetObjectMeta()).ViaField("metadata") + if pr.IsPending() && pr.HasStarted() { errs = errs.Also(apis.ErrInvalidValue("PipelineRun cannot be Pending after it is started", "spec.status")) } @@ -72,8 +72,6 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) } } - // This is an alpha feature and will fail validation if it's used in a pipelinerun spec - // when the enable-api-fields feature gate is anything but "alpha". if ps.Timeouts != nil { if ps.Timeout != nil { // can't have both at the same time @@ -97,7 +95,7 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) } } - errs = errs.Also(validateSpecStatus(ctx, ps.Status)) + errs = errs.Also(validateSpecStatus(ps.Status)) if ps.Workspaces != nil { wsNames := make(map[string]int) @@ -117,12 +115,11 @@ func (ps *PipelineRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) return errs } -func validateSpecStatus(ctx context.Context, status PipelineRunSpecStatus) *apis.FieldError { +func validateSpecStatus(status PipelineRunSpecStatus) *apis.FieldError { switch status { case "": return nil - case PipelineRunSpecStatusPending, - PipelineRunSpecStatusCancelledDeprecated: + case PipelineRunSpecStatusPending: return nil case PipelineRunSpecStatusCancelled, PipelineRunSpecStatusCancelledRunFinally, @@ -153,7 +150,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Tasks.Duration > timeout { tasksTimeoutErr = true } - if ps.Timeouts.Tasks.Duration == apisconfig.NoTimeoutDuration && timeout != apisconfig.NoTimeoutDuration { + if ps.Timeouts.Tasks.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { tasksTimeoutErr = true tasksTimeoutStr += " (no timeout)" } @@ -168,7 +165,7 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM if ps.Timeouts.Finally.Duration > timeout { finallyTimeoutErr = true } - if ps.Timeouts.Finally.Duration == apisconfig.NoTimeoutDuration && timeout != apisconfig.NoTimeoutDuration { + if ps.Timeouts.Finally.Duration == config.NoTimeoutDuration && timeout != config.NoTimeoutDuration { finallyTimeoutErr = true finallyTimeoutStr += " (no timeout)" } @@ -187,21 +184,17 @@ func (ps *PipelineRunSpec) validatePipelineTimeout(timeout time.Duration, errorM } func validateTaskRunSpec(ctx context.Context, trs PipelineTaskRunSpec) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - if trs.StepOverrides != nil { - errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides")) - } - if trs.SidecarOverrides != nil { - errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides")) - } - } else { - if trs.StepOverrides != nil { - errs = errs.Also(apis.ErrDisallowedFields("stepOverrides")) - } - if trs.SidecarOverrides != nil { - errs = errs.Also(apis.ErrDisallowedFields("sidecarOverrides")) - } + if trs.StepOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(validateStepOverrides(trs.StepOverrides).ViaField("stepOverrides")) + } + if trs.SidecarOverrides != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(validateSidecarOverrides(trs.SidecarOverrides).ViaField("sidecarOverrides")) + } + if trs.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(trs.ComputeResources, trs.StepOverrides)) } return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go index 2c453f136a..2898fd9dfc 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go @@ -26,7 +26,7 @@ import ( ) // Validate implements apis.Validatable -func (tr *TaskResources) Validate(ctx context.Context) (errs *apis.FieldError) { +func (tr *TaskResources) Validate(context.Context) (errs *apis.FieldError) { if tr != nil { errs = errs.Also(validateTaskResources(tr.Inputs).ViaField("inputs")) errs = errs.Also(validateTaskResources(tr.Outputs).ViaField("outputs")) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go index 28cdc21cfa..68de44f6da 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_defaults.go @@ -16,7 +16,7 @@ package v1beta1 import "context" // SetDefaults set the default type for TaskResult -func (tr *TaskResult) SetDefaults(ctx context.Context) { +func (tr *TaskResult) SetDefaults(context.Context) { if tr != nil && tr.Type == "" { // ResultsTypeString is the default value tr.Type = ResultsTypeString diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go index 21a4e1ae16..cbdc5404c3 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_types.go @@ -13,6 +13,8 @@ limitations under the License. package v1beta1 +import "strings" + // TaskResult used to describe the results of a task type TaskResult struct { // Name the given name @@ -23,6 +25,10 @@ type TaskResult struct { // +optional Type ResultsType `json:"type,omitempty"` + // Properties is the JSON Schema properties to support key-value pairs results. + // +optional + Properties map[string]PropertySpec `json:"properties,omitempty"` + // Description is a human-readable description of the result // +optional Description string `json:"description,omitempty"` @@ -60,3 +66,8 @@ const ( // AllResultsTypes can be used for ResultsTypes validation. var AllResultsTypes = []ResultsType{ResultsTypeString, ResultsTypeArray, ResultsTypeObject} + +// ResultsArrayReference returns the reference of the result. e.g. results.resultname from $(results.resultname[*]) +func ResultsArrayReference(a string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(a, "$("), ")"), "[*]") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go index 6df4314964..3d0dd4ee04 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/result_validation.go @@ -18,6 +18,7 @@ import ( "fmt" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) @@ -28,7 +29,7 @@ func (tr TaskResult) Validate(ctx context.Context) (errs *apis.FieldError) { } // Array and Object is alpha feature if tr.Type == ResultsTypeArray || tr.Type == ResultsTypeObject { - return errs.Also(ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) + return errs.Also(version.ValidateEnabledAPIFields(ctx, "results type", config.AlphaAPIFields)) } // Resources created before the result. Type was introduced may not have Type set diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go index 466923dc8d..4eb8fa322a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go @@ -43,15 +43,22 @@ const ( ResultResultPart = "results" // TODO(#2462) use one regex across all substitutions // variableSubstitutionFormat matches format like $result.resultname, $result.resultname[int] and $result.resultname[*] - variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9])*\*?\])?\)` + variableSubstitutionFormat = `\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)` + // exactVariableSubstitutionFormat matches strings that only contain a single reference to result or param variables, but nothing else + // i.e. `$(result.resultname)` is a match, but `foo $(result.resultname)` is not. + exactVariableSubstitutionFormat = `^\$\([_a-zA-Z0-9.-]+(\.[_a-zA-Z0-9.-]+)*(\[([0-9]+|\*)\])?\)$` // arrayIndexing will match all `[int]` and `[*]` for parseExpression arrayIndexing = `\[([0-9])*\*?\]` // ResultNameFormat Constant used to define the the regex Result.Name should follow ResultNameFormat = `^([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$` ) -var variableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +// VariableSubstitutionRegex is a regex to find all result matching substitutions +var VariableSubstitutionRegex = regexp.MustCompile(variableSubstitutionFormat) +var exactVariableSubstitutionRegex = regexp.MustCompile(exactVariableSubstitutionFormat) var resultNameFormatRegex = regexp.MustCompile(ResultNameFormat) + +// arrayIndexingRegex is used to match `[int]` and `[*]` var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) // NewResultRefs extracts all ResultReferences from a param or a pipeline result. @@ -120,12 +127,18 @@ func GetVarSubstitutionExpressionsForParam(param Param) ([]string, bool) { // GetVarSubstitutionExpressionsForPipelineResult extracts all the value between "$(" and ")"" for a pipeline result func GetVarSubstitutionExpressionsForPipelineResult(result PipelineResult) ([]string, bool) { - allExpressions := validateString(result.Value) + allExpressions := validateString(result.Value.StringVal) + for _, v := range result.Value.ArrayVal { + allExpressions = append(allExpressions, validateString(v)...) + } + for _, v := range result.Value.ObjectVal { + allExpressions = append(allExpressions, validateString(v)...) + } return allExpressions, len(allExpressions) != 0 } func validateString(value string) []string { - expressions := variableSubstitutionRegex.FindAllString(value, -1) + expressions := VariableSubstitutionRegex.FindAllString(value, -1) if expressions == nil { return nil } @@ -160,13 +173,12 @@ func parseExpression(substitutionExpression string) (string, string, int, string // For string result: tasks..results. // For array result: tasks..results.[index] if len(subExpressions) == 4 && subExpressions[0] == ResultTaskPart && subExpressions[2] == ResultResultPart { - stringIdx := strings.TrimSuffix(strings.TrimPrefix(arrayIndexingRegex.FindString(subExpressions[3]), "["), "]") - subExpressions[3] = arrayIndexingRegex.ReplaceAllString(subExpressions[3], "") + resultName, stringIdx := ParseResultName(subExpressions[3]) if stringIdx != "" { intIdx, _ := strconv.Atoi(stringIdx) - return subExpressions[1], subExpressions[3], intIdx, "", nil + return subExpressions[1], resultName, intIdx, "", nil } - return subExpressions[1], subExpressions[3], 0, "", nil + return subExpressions[1], resultName, 0, "", nil } // For object type result: tasks..results.. @@ -177,11 +189,24 @@ func parseExpression(substitutionExpression string) (string, string, int, string return "", "", 0, "", fmt.Errorf("Must be one of the form 1). %q; 2). %q", resultExpressionFormat, objectResultExpressionFormat) } +// ParseResultName parse the input string to extract resultName and result index. +// Array indexing: +// Input: anArrayResult[1] +// Output: anArrayResult, "1" +// Array star reference: +// Input: anArrayResult[*] +// Output: anArrayResult, "*" +func ParseResultName(resultName string) (string, string) { + stringIdx := strings.TrimSuffix(strings.TrimPrefix(arrayIndexingRegex.FindString(resultName), "["), "]") + resultName = arrayIndexingRegex.ReplaceAllString(resultName, "") + return resultName, stringIdx +} + // PipelineTaskResultRefs walks all the places a result reference can be used // in a PipelineTask and returns a list of any references that are found. func PipelineTaskResultRefs(pt *PipelineTask) []*ResultRef { refs := []*ResultRef{} - for _, p := range pt.Params { + for _, p := range append(pt.Params, pt.Matrix...) { expressions, _ := GetVarSubstitutionExpressionsForParam(p) refs = append(refs, NewResultRefs(expressions)...) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go deleted file mode 100644 index a331936cd4..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - Copyright 2020 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplySidecarReplacements applies variable interpolation on a Sidecar. -func ApplySidecarReplacements(sidecar *Sidecar, stringReplacements map[string]string, arrayReplacements map[string][]string) { - sidecar.Script = substitution.ApplyReplacements(sidecar.Script, stringReplacements) - applySidecarReplacements(sidecar, stringReplacements, arrayReplacements) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go new file mode 100644 index 0000000000..860bd7f1f8 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/status_validation.go @@ -0,0 +1,36 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + "github.com/tektoncd/pipeline/pkg/apis/config" + "knative.dev/pkg/apis" +) + +// ValidateEmbeddedStatus checks that the embedded-status feature gate is set to the wantEmbeddedStatus value and, +// if not, returns an error stating which feature is dependent on the status and what the current status actually is. +func ValidateEmbeddedStatus(ctx context.Context, featureName, wantEmbeddedStatus string) *apis.FieldError { + embeddedStatus := config.FromContextOrDefaults(ctx).FeatureFlags.EmbeddedStatus + if embeddedStatus != wantEmbeddedStatus { + message := fmt.Sprintf(`%s requires "embedded-status" feature gate to be %q but it is %q`, featureName, wantEmbeddedStatus, embeddedStatus) + return apis.ErrGeneric(message) + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go deleted file mode 100644 index 3331d30d6f..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright 2019 The Tekton Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package v1beta1 - -import ( - "github.com/tektoncd/pipeline/pkg/substitution" -) - -// ApplyStepReplacements applies variable interpolation on a Step. -func ApplyStepReplacements(step *Step, stringReplacements map[string]string, arrayReplacements map[string][]string) { - step.Script = substitution.ApplyReplacements(step.Script, stringReplacements) - applyStepReplacements(step, stringReplacements, arrayReplacements) -} - -// ApplyStepTemplateReplacements applies variable interpolation on a StepTemplate (aka a container) -func ApplyStepTemplateReplacements(stepTemplate *StepTemplate, stringReplacements map[string]string, arrayReplacements map[string][]string) { - container := stepTemplate.ToK8sContainer() - applyContainerReplacements(container, stringReplacements, arrayReplacements) - stepTemplate.SetContainerFields(*container) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json index 0d55e1f779..1bc9780c05 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json @@ -707,7 +707,7 @@ } }, "v1beta1.PipelineRef": { - "description": "PipelineRef can be used to refer to a specific instance of a Pipeline. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + "description": "PipelineRef can be used to refer to a specific instance of a Pipeline.", "type": "object", "properties": { "apiVersion": { @@ -799,10 +799,14 @@ "type": "string", "default": "" }, + "type": { + "description": "Type is the user-specified type of the result. The possible types are 'string', 'array', and 'object', with 'string' as the default. 'array' and 'object' types are alpha features.", + "type": "string" + }, "value": { "description": "Value the expression used to retrieve the value", - "type": "string", - "default": "" + "default": {}, + "$ref": "#/definitions/v1beta1.ArrayOrString" } } }, @@ -872,8 +876,8 @@ }, "value": { "description": "Value is the result returned from the execution of this PipelineRun", - "type": "string", - "default": "" + "default": {}, + "$ref": "#/definitions/v1beta1.ArrayOrString" } } }, @@ -1394,6 +1398,10 @@ "description": "PipelineTaskRunSpec can be used to configure specific specs for a concrete Task", "type": "object", "properties": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, "metadata": { "$ref": "#/definitions/v1beta1.PipelineTaskMetadata" }, @@ -1844,6 +1852,10 @@ "description": "Deprecated. This field will be removed in a future release. DeprecatedStartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "$ref": "#/definitions/v1.Probe" }, + "stderrConfig": { + "description": "Stores configuration for the stderr stream of the step.", + "$ref": "#/definitions/v1beta1.StepOutputConfig" + }, "stdin": { "description": "Deprecated. This field will be removed in a future release. Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", "type": "boolean" @@ -1852,6 +1864,10 @@ "description": "Deprecated. This field will be removed in a future release. Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", "type": "boolean" }, + "stdoutConfig": { + "description": "Stores configuration for the stdout stream of the step.", + "$ref": "#/definitions/v1beta1.StepOutputConfig" + }, "terminationMessagePath": { "description": "Deprecated. This field will be removed in a future release. Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "type": "string" @@ -1905,6 +1921,16 @@ } } }, + "v1beta1.StepOutputConfig": { + "description": "StepOutputConfig stores configuration for a step output stream.", + "type": "object", + "properties": { + "path": { + "description": "Path to duplicate stdout stream to on container's local filesystem.", + "type": "string" + } + } + }, "v1beta1.StepState": { "description": "StepState reports the results of running a step in a Task.", "type": "object", @@ -2130,7 +2156,7 @@ } }, "v1beta1.TaskRef": { - "description": "TaskRef can be used to refer to a specific instance of a task. Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64", + "description": "TaskRef can be used to refer to a specific instance of a task.", "type": "object", "properties": { "apiVersion": { @@ -2250,6 +2276,14 @@ "type": "string", "default": "" }, + "properties": { + "description": "Properties is the JSON Schema properties to support key-value pairs results.", + "type": "object", + "additionalProperties": { + "default": {}, + "$ref": "#/definitions/v1beta1.PropertySpec" + } + }, "type": { "description": "Type is the user-specified type of the result. The possible type is currently \"string\" and will support \"array\" in following work.", "type": "string" @@ -2432,6 +2466,10 @@ "description": "TaskRunSpec defines the desired state of TaskRun", "type": "object", "properties": { + "computeResources": { + "description": "Compute resources to use for this TaskRun", + "$ref": "#/definitions/v1.ResourceRequirements" + }, "debug": { "$ref": "#/definitions/v1beta1.TaskRunDebug" }, @@ -2832,6 +2870,10 @@ "description": "ConfigMap represents a configMap that should populate this workspace.", "$ref": "#/definitions/v1.ConfigMapVolumeSource" }, + "csi": { + "description": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers.", + "$ref": "#/definitions/v1.CSIVolumeSource" + }, "emptyDir": { "description": "EmptyDir represents a temporary directory that shares a Task's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir Either this OR PersistentVolumeClaim can be used.", "$ref": "#/definitions/v1.EmptyDirVolumeSource" @@ -2845,6 +2887,10 @@ "description": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. Either this OR EmptyDir can be used.", "$ref": "#/definitions/v1.PersistentVolumeClaimVolumeSource" }, + "projected": { + "description": "Projected represents a projected volume that should populate this workspace.", + "$ref": "#/definitions/v1.ProjectedVolumeSource" + }, "secret": { "description": "Secret represents a secret that should populate this workspace.", "$ref": "#/definitions/v1.SecretVolumeSource" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go index 646a740915..f8155cd97c 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go @@ -27,10 +27,16 @@ var _ apis.Convertible = (*Task)(nil) // ConvertTo implements api.Convertible func (t *Task) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) } // ConvertFrom implements api.Convertible func (t *Task) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest know version, got: %T", source) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go index c0d168b0a7..957b0aef69 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go @@ -134,36 +134,3 @@ type TaskList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Task `json:"items"` } - -// TaskRef can be used to refer to a specific instance of a task. -// Copied from CrossVersionObjectReference: https://github.com/kubernetes/kubernetes/blob/169df7434155cbbc22f1532cba8e0a9588e29ad8/pkg/apis/autoscaling/types.go#L64 -type TaskRef struct { - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name,omitempty"` - // TaskKind indicates the kind of the task, namespaced or cluster scoped. - Kind TaskKind `json:"kind,omitempty"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty"` - // Bundle url reference to a Tekton Bundle. - // +optional - Bundle string `json:"bundle,omitempty"` - - // ResolverRef allows referencing a Task in a remote location - // like a git repo. This field is only supported when the alpha - // feature gate is enabled. - // +optional - ResolverRef `json:",omitempty"` -} - -// Check that Pipeline may be validated and defaulted. - -// TaskKind defines the type of Task used by the pipeline. -type TaskKind string - -const ( - // NamespacedTaskKind indicates that the task type has a namespaced scope. - NamespacedTaskKind TaskKind = "Task" - // ClusterTaskKind indicates that task type has a cluster scope. - ClusterTaskKind TaskKind = "ClusterTask" -) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index 5d17c2b452..335ecd52c4 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -26,6 +26,7 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" "github.com/tektoncd/pipeline/pkg/list" "github.com/tektoncd/pipeline/pkg/substitution" corev1 "k8s.io/api/core/v1" @@ -34,16 +35,27 @@ import ( "knative.dev/pkg/apis" ) -var _ apis.Validatable = (*Task)(nil) +const ( + // stringAndArrayVariableNameFormat is the regex to validate if string/array variable name format follows the following rules. + // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) + // - Must begin with a letter or an underscore (_) + stringAndArrayVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" + + // objectVariableNameFormat is the regext used to validate object name and key names format + // The difference with the array or string name format is that object variable names shouldn't contain dots. + objectVariableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9-]*$" +) -const variableNameFormat = "^[_a-zA-Z][_a-zA-Z0-9.-]*$" +var _ apis.Validatable = (*Task)(nil) +var stringAndArrayVariableNameFormatRegex = regexp.MustCompile(stringAndArrayVariableNameFormat) +var objectVariableNameFormatRegex = regexp.MustCompile(objectVariableNameFormat) // Validate implements apis.Validatable func (t *Task) Validate(ctx context.Context) *apis.FieldError { - errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") if apis.IsInDelete(ctx) { return nil } + errs := validate.ObjectMetadata(t.GetObjectMeta()).ViaField("metadata") return errs.Also(t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } @@ -140,7 +152,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for stepIdx, step := range steps { if len(step.Workspaces) != 0 { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step workspaces", config.AlphaAPIFields).ViaIndex(stepIdx).ViaField("steps")) } for workspaceIdx, w := range step.Workspaces { if !wsNames.Has(w.Name) { @@ -151,7 +163,7 @@ func validateWorkspaceUsages(ctx context.Context, ts *TaskSpec) (errs *apis.Fiel for sidecarIdx, sidecar := range sidecars { if len(sidecar.Workspaces) != 0 { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecar workspaces", config.AlphaAPIFields).ViaIndex(sidecarIdx).ViaField("sidecars")) } for workspaceIdx, w := range sidecar.Workspaces { if !wsNames.Has(w.Name) { @@ -243,9 +255,20 @@ func validateStep(ctx context.Context, s Step, names sets.String) (errs *apis.Fi if s.Script != "" { cleaned := strings.TrimSpace(s.Script) if strings.HasPrefix(cleaned, "#!win") { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "windows script support", config.AlphaAPIFields).ViaField("script")) } } + + // StdoutConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StdoutConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stdout stream support", config.AlphaAPIFields).ViaField("stdoutconfig")) + } + // StderrConfig is an alpha feature and will fail validation if it's used in a task spec + // when the enable-api-fields feature gate is not "alpha". + if s.StderrConfig != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "step stderr stream support", config.AlphaAPIFields).ViaField("stderrconfig")) + } return errs } @@ -255,7 +278,7 @@ func ValidateParameterTypes(ctx context.Context, params []ParamSpec) (errs *apis if p.Type == ParamTypeObject { // Object type parameter is an alpha feature and will fail validation if it's used in a task spec // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) } errs = errs.Also(p.ValidateType()) } @@ -318,25 +341,30 @@ func (p ParamSpec) ValidateObjectType() *apis.FieldError { // ValidateParameterVariables validates all variables within a slice of ParamSpecs against a slice of Steps func ValidateParameterVariables(ctx context.Context, steps []Step, params []ParamSpec) *apis.FieldError { - parameterNames := sets.NewString() + allParameterNames := sets.NewString() + stringParameterNames := sets.NewString() arrayParameterNames := sets.NewString() objectParamSpecs := []ParamSpec{} var errs *apis.FieldError for _, p := range params { // validate no duplicate names - if parameterNames.Has(p.Name) { + if allParameterNames.Has(p.Name) { errs = errs.Also(apis.ErrGeneric("parameter appears more than once", "").ViaFieldKey("params", p.Name)) } - parameterNames.Insert(p.Name) - if p.Type == ParamTypeArray { + allParameterNames.Insert(p.Name) + + switch p.Type { + case ParamTypeArray: arrayParameterNames.Insert(p.Name) - } - if p.Type == ParamTypeObject { + case ParamTypeObject: objectParamSpecs = append(objectParamSpecs, p) + default: + stringParameterNames.Insert(p.Name) } } - errs = errs.Also(validateVariables(ctx, steps, "params", parameterNames)) + errs = errs.Also(validateNameFormat(stringParameterNames.Insert(arrayParameterNames.List()...), objectParamSpecs)) + errs = errs.Also(validateVariables(ctx, steps, "params", allParameterNames)) errs = errs.Also(validateArrayUsage(steps, "params", arrayParameterNames)) errs = errs.Also(validateObjectDefault(objectParamSpecs)) return errs.Also(validateObjectUsage(ctx, steps, objectParamSpecs)) @@ -399,13 +427,13 @@ func validateObjectUsage(ctx context.Context, steps []Step, params []ParamSpec) // slice of ParamSpecs are provided in default iff the default section is provided. func validateObjectDefault(objectParams []ParamSpec) (errs *apis.FieldError) { for _, p := range objectParams { - errs = errs.Also(validateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) + errs = errs.Also(ValidateObjectKeys(p.Properties, p.Default).ViaField(p.Name)) } return errs } -// validateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. -func validateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { +// ValidateObjectKeys validates if object keys defined in properties are all provided in its value provider iff the provider is not nil. +func ValidateObjectKeys(properties map[string]PropertySpec, propertiesProvider *ArrayOrString) (errs *apis.FieldError) { if propertiesProvider == nil || propertiesProvider.ObjectVal == nil { return nil } @@ -497,31 +525,59 @@ func validateStepArrayUsage(step Step, prefix string, vars sets.String) *apis.Fi } func validateVariables(ctx context.Context, steps []Step, prefix string, vars sets.String) (errs *apis.FieldError) { - // validate that the variable name format follows the rules - // - Must only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.) - // - Must begin with a letter or an underscore (_) - re := regexp.MustCompile(variableNameFormat) - invalidNames := []string{} + // We've checked param name format. Now, we want to check if param names are referenced correctly in each step + for idx, step := range steps { + errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + } + return errs +} + +// validateNameFormat validates that the name format of all param types follows the rules +func validateNameFormat(stringAndArrayParams sets.String, objectParams []ParamSpec) (errs *apis.FieldError) { + // checking string or array name format + // ---- + invalidStringAndArrayNames := []string{} // Converting to sorted list here rather than just looping map keys // because we want the order of items in vars to be deterministic for purpose of unit testing - for _, name := range vars.List() { - if !re.MatchString(name) { - invalidNames = append(invalidNames, name) + for _, name := range stringAndArrayParams.List() { + if !stringAndArrayVariableNameFormatRegex.MatchString(name) { + invalidStringAndArrayNames = append(invalidStringAndArrayNames, name) } } - if len(invalidNames) != 0 { - return &apis.FieldError{ - Message: fmt.Sprintf("The format of following variable names is invalid. %s", invalidNames), + if len(invalidStringAndArrayNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("The format of following array and string variable names is invalid: %s", invalidStringAndArrayNames), Paths: []string{"params"}, - Details: "Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + Details: "String/Array Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_), and dots (.)\nMust begin with a letter or an underscore (_)", + }) + } + + // checking object name and key name format + // ----- + invalidObjectNames := map[string][]string{} + for _, obj := range objectParams { + // check object param name + if !objectVariableNameFormatRegex.MatchString(obj.Name) { + invalidObjectNames[obj.Name] = []string{} + } + + // check key names + for k := range obj.Properties { + if !objectVariableNameFormatRegex.MatchString(k) { + invalidObjectNames[obj.Name] = append(invalidObjectNames[obj.Name], k) + } } } - // We've checked param name format. Now, we want to check if param names are referenced correctly in each step - for idx, step := range steps { - errs = errs.Also(validateStepVariables(ctx, step, prefix, vars).ViaFieldIndex("steps", idx)) + if len(invalidObjectNames) != 0 { + errs = errs.Also(&apis.FieldError{ + Message: fmt.Sprintf("Object param name and key name format is invalid: %s", invalidObjectNames), + Paths: []string{"params"}, + Details: "Object Names: \nMust only contain alphanumeric characters, hyphens (-), underscores (_) \nMust begin with a letter or an underscore (_)", + }) } + return errs } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go new file mode 100644 index 0000000000..07aeb436d7 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_types.go @@ -0,0 +1,49 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// TaskRef can be used to refer to a specific instance of a task. +type TaskRef struct { + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name,omitempty"` + // TaskKind indicates the kind of the task, namespaced or cluster scoped. + Kind TaskKind `json:"kind,omitempty"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Bundle url reference to a Tekton Bundle. + // +optional + Bundle string `json:"bundle,omitempty"` + + // ResolverRef allows referencing a Task in a remote location + // like a git repo. This field is only supported when the alpha + // feature gate is enabled. + // +optional + ResolverRef `json:",omitempty"` +} + +// Check that Pipeline may be validated and defaulted. + +// TaskKind defines the type of Task used by the pipeline. +type TaskKind string + +const ( + // NamespacedTaskKind indicates that the task type has a namespaced scope. + NamespacedTaskKind TaskKind = "Task" + // ClusterTaskKind indicates that task type has a cluster scope. + ClusterTaskKind TaskKind = "ClusterTask" +) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go index dc5e97611f..652eed6cff 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskref_validation.go @@ -21,81 +21,43 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "knative.dev/pkg/apis" ) // Validate ensures that a supplied TaskRef field is populated // correctly. No errors are returned for a nil TaskRef. func (ref *TaskRef) Validate(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) if ref == nil { return } - if cfg.FeatureFlags.EnableAPIFields == config.AlphaAPIFields { - errs = errs.Also(ref.validateAlphaRef(ctx)) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) - } - return -} -// validateInTreeRef returns errors if the given taskRef is not valid for -// Pipelines' built-in resolution machinery. -func (ref *TaskRef) validateInTreeRef(ctx context.Context) (errs *apis.FieldError) { - cfg := config.FromContextOrDefaults(ctx) - if ref.Resolver != "" { - errs = errs.Also(apis.ErrDisallowedFields("resolver")) - } - if ref.Resource != nil { - errs = errs.Also(apis.ErrDisallowedFields("resource")) - } - if ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) - } - if cfg.FeatureFlags.EnableTektonOCIBundles { - if ref.Bundle != "" && ref.Name == "" { - errs = errs.Also(apis.ErrMissingField("name")) + switch { + case ref.Resolver != "": + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resolver", config.AlphaAPIFields).ViaField("resolver")) + if ref.Name != "" { + errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) } if ref.Bundle != "" { - if _, err := name.ParseReference(ref.Bundle); err != nil { - errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) - } - } - } else if ref.Bundle != "" { - errs = errs.Also(apis.ErrDisallowedFields("bundle")) - } - return -} - -// validateAlphaRef ensures that the user has passed either a -// valid remote resource reference or a valid in-tree resource reference, -// but not both. -func (ref *TaskRef) validateAlphaRef(ctx context.Context) (errs *apis.FieldError) { - hasResolver := ref.Resolver != "" - hasResource := ref.Resource != nil - hasName := ref.Name != "" - hasBundle := ref.Bundle != "" - if hasName { - if hasResolver { - errs = errs.Also(apis.ErrMultipleOneOf("name", "resolver")) + errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) } - if hasResource { + case ref.Resource != nil: + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "resource", config.AlphaAPIFields).ViaField("resource")) + if ref.Name != "" { errs = errs.Also(apis.ErrMultipleOneOf("name", "resource")) } - } - if hasBundle { - if hasResolver { - errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resolver")) - } - if hasResource { + if ref.Bundle != "" { errs = errs.Also(apis.ErrMultipleOneOf("bundle", "resource")) } - } - if !hasResolver { - if hasResource { + if ref.Resolver == "" { errs = errs.Also(apis.ErrMissingField("resolver")) - } else { - errs = errs.Also(ref.validateInTreeRef(ctx)) + } + case ref.Name == "": + errs = errs.Also(apis.ErrMissingField("name")) + case ref.Bundle != "": + errs = errs.Also(validateBundleFeatureFlag(ctx, "bundle", true).ViaField("bundle")) + if _, err := name.ParseReference(ref.Bundle); err != nil { + errs = errs.Also(apis.ErrInvalidValue("invalid bundle reference", "bundle", err.Error())) } } return diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index 654ac4ffc5..074c2bf14b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -27,10 +27,16 @@ var _ apis.Convertible = (*TaskRun)(nil) // ConvertTo implements api.Convertible func (tr *TaskRun) ConvertTo(ctx context.Context, sink apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest known version, got: %T", sink) } // ConvertFrom implements api.Convertible func (tr *TaskRun) ConvertFrom(ctx context.Context, source apis.Convertible) error { + if apis.IsInDelete(ctx) { + return nil + } return fmt.Errorf("v1beta1 is the highest know version, got: %T", source) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go index 03e98bec97..767ce332fe 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -77,6 +77,8 @@ type TaskRunSpec struct { // +optional // +listType=atomic SidecarOverrides []TaskRunSidecarOverride `json:"sidecarOverrides,omitempty"` + // Compute resources to use for this TaskRun + ComputeResources *corev1.ResourceRequirements `json:"computeResources,omitempty"` } // TaskRunSpecStatus defines the taskrun spec status the user can provide @@ -122,6 +124,19 @@ type TaskRunStatus struct { TaskRunStatusFields `json:",inline"` } +// TaskRunConditionType is an enum used to store TaskRun custom conditions +// conditions such as one used in spire results verification +type TaskRunConditionType string + +const ( + // TaskRunConditionResultsVerified is a Condition Type that indicates that the results were verified by spire + TaskRunConditionResultsVerified TaskRunConditionType = "SignedResultsVerified" +) + +func (t TaskRunConditionType) String() string { + return string(t) +} + // TaskRunReason is an enum used to store all TaskRun reason for // the Succeeded condition that are controlled by the TaskRun itself. Failure // reasons that emerge from underlying resources are not included here @@ -145,6 +160,12 @@ const ( TaskRunReasonResolvingTaskRef = "ResolvingTaskRef" // TaskRunReasonImagePullFailed is the reason set when the step of a task fails due to image not being pulled TaskRunReasonImagePullFailed TaskRunReason = "TaskRunImagePullFailed" + // TaskRunReasonResultsVerified is the reason set when the TaskRun results are verified by spire + TaskRunReasonResultsVerified TaskRunReason = "TaskRunResultsVerified" + // TaskRunReasonsResultsVerificationFailed is the reason set when the TaskRun results are failed to verify by spire + TaskRunReasonsResultsVerificationFailed TaskRunReason = "TaskRunResultsVerificationFailed" + // AwaitingTaskRunResults is the reason set when waiting upon `TaskRun` results and signatures to verify + AwaitingTaskRunResults TaskRunReason = "AwaitingTaskRunResults" ) func (t TaskRunReason) String() string { @@ -422,6 +443,16 @@ func (tr *TaskRun) IsCancelled() bool { return tr.Spec.Status == TaskRunSpecStatusCancelled } +// IsTaskRunResultVerified returns true if the TaskRun's results have been validated by spire. +func (tr *TaskRun) IsTaskRunResultVerified() bool { + return tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsTrue() +} + +// IsTaskRunResultDone returns true if the TaskRun's results are available for verification +func (tr *TaskRun) IsTaskRunResultDone() bool { + return !tr.Status.GetCondition(apis.ConditionType(TaskRunConditionResultsVerified.String())).IsUnknown() +} + // HasTimedOut returns true if the TaskRun runtime is beyond the allowed timeout func (tr *TaskRun) HasTimedOut(ctx context.Context, c clock.PassiveClock) bool { if tr.Status.StartTime.IsZero() { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index e4fa03228e..d43032a1db 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -23,6 +23,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/validate" + "github.com/tektoncd/pipeline/pkg/apis/version" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) @@ -31,10 +33,10 @@ var _ apis.Validatable = (*TaskRun)(nil) // Validate taskrun func (tr *TaskRun) Validate(ctx context.Context) *apis.FieldError { - errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata") if apis.IsInDelete(ctx) { return nil } + errs := validate.ObjectMetadata(tr.GetObjectMeta()).ViaField("metadata") return errs.Also(tr.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")) } @@ -56,21 +58,25 @@ func (ts *TaskRunSpec) Validate(ctx context.Context) (errs *apis.FieldError) { errs = errs.Also(ts.TaskSpec.Validate(ctx).ViaField("taskSpec")) } - errs = errs.Also(validateParameters(ctx, ts.Params).ViaField("params")) - errs = errs.Also(validateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) + errs = errs.Also(ValidateParameters(ctx, ts.Params).ViaField("params")) + errs = errs.Also(ValidateWorkspaceBindings(ctx, ts.Workspaces).ViaField("workspaces")) errs = errs.Also(ts.Resources.Validate(ctx).ViaField("resources")) if ts.Debug != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "debug", config.AlphaAPIFields).ViaField("debug")) errs = errs.Also(validateDebug(ts.Debug).ViaField("debug")) } if ts.StepOverrides != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "stepOverrides", config.AlphaAPIFields).ViaField("stepOverrides")) errs = errs.Also(validateStepOverrides(ts.StepOverrides).ViaField("stepOverrides")) } if ts.SidecarOverrides != nil { - errs = errs.Also(ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "sidecarOverrides", config.AlphaAPIFields).ViaField("sidecarOverrides")) errs = errs.Also(validateSidecarOverrides(ts.SidecarOverrides).ViaField("sidecarOverrides")) } + if ts.ComputeResources != nil { + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "computeResources", config.AlphaAPIFields).ViaField("computeResources")) + errs = errs.Also(validateTaskRunComputeResources(ts.ComputeResources, ts.StepOverrides)) + } if ts.Status != "" { if ts.Status != TaskRunSpecStatusCancelled { @@ -101,8 +107,8 @@ func validateDebug(db *TaskRunDebug) (errs *apis.FieldError) { return errs } -// validateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. -func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { +// ValidateWorkspaceBindings makes sure the volumes provided for the Task's declared workspaces make sense. +func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs *apis.FieldError) { var names []string for idx, w := range wb { names = append(names, w.Name) @@ -112,13 +118,14 @@ func validateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs return errs } -func validateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { +// ValidateParameters makes sure the params for the Task are valid. +func ValidateParameters(ctx context.Context, params []Param) (errs *apis.FieldError) { var names []string for _, p := range params { if p.Value.Type == ParamTypeObject { // Object type parameter is an alpha feature and will fail validation if it's used in a taskrun spec // when the enable-api-fields feature gate is not "alpha". - errs = errs.Also(ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) + errs = errs.Also(version.ValidateEnabledAPIFields(ctx, "object type parameter", config.AlphaAPIFields)) } names = append(names, p.Name) } @@ -138,6 +145,19 @@ func validateStepOverrides(overrides []TaskRunStepOverride) (errs *apis.FieldErr return errs } +// validateTaskRunComputeResources ensures that compute resources are not configured at both the step level and the task level +func validateTaskRunComputeResources(computeResources *corev1.ResourceRequirements, overrides []TaskRunStepOverride) (errs *apis.FieldError) { + for _, override := range overrides { + if override.Resources.Size() != 0 && computeResources != nil { + return apis.ErrMultipleOneOf( + "stepOverrides.resources", + "computeResources", + ) + } + } + return nil +} + func validateSidecarOverrides(overrides []TaskRunSidecarOverride) (errs *apis.FieldError) { var names []string for i, o := range overrides { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go index 0d6f170d5e..ac53da805d 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go @@ -62,9 +62,12 @@ func (we *WhenExpression) applyReplacements(replacements map[string]string, arra for _, val := range we.Values { // arrayReplacements holds a list of array parameters with a pattern - params.arrayParam1 // array params are referenced using $(params.arrayParam1[*]) + // array results are referenced using $(results.resultname[*]) // check if the param exist in the arrayReplacements to replace it with a list of values if _, ok := arrayReplacements[fmt.Sprintf("%s.%s", ParamsPrefix, ArrayReference(val))]; ok { replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...) + } else if _, ok := arrayReplacements[ResultsArrayReference(val)]; ok { + replacedValues = append(replacedValues, substitution.ApplyArrayReplacements(val, replacements, arrayReplacements)...) } else { replacedValues = append(replacedValues, substitution.ApplyReplacements(val, replacements)) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go index 316dbf10f2..b5a0b1c8e9 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go @@ -74,17 +74,17 @@ func (wes WhenExpressions) validateTaskResultsVariables() *apis.FieldError { return nil } -func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String) (errs *apis.FieldError) { +func (wes WhenExpressions) validatePipelineParametersVariables(prefix string, paramNames sets.String, arrayParamNames sets.String, objectParamNameKeys map[string][]string) (errs *apis.FieldError) { for idx, we := range wes { - errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames).ViaField("input").ViaFieldIndex("when", idx)) + errs = errs.Also(validateStringVariable(we.Input, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("input").ViaFieldIndex("when", idx)) for _, val := range we.Values { // one of the values could be a reference to an array param, such as, $(params.foo[*]) // extract the variable name from the pattern $(params.foo[*]), if the variable name matches with one of the array params // validate the param as an array variable otherwise, validate it as a string variable if arrayParamNames.Has(ArrayReference(val)) { - errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames).ViaField("values").ViaFieldIndex("when", idx)) + errs = errs.Also(validateArrayVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx)) } else { - errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames).ViaField("values").ViaFieldIndex("when", idx)) + errs = errs.Also(validateStringVariable(val, prefix, paramNames, arrayParamNames, objectParamNameKeys).ViaField("values").ViaFieldIndex("when", idx)) } } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go index 263d1d21cb..f915fe13df 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go @@ -77,6 +77,12 @@ type WorkspaceBinding struct { // Secret represents a secret that should populate this workspace. // +optional Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + // Projected represents a projected volume that should populate this workspace. + // +optional + Projected *corev1.ProjectedVolumeSource `json:"projected,omitempty"` + // CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers. + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty"` } // WorkspacePipelineDeclaration creates a named slot in a Pipeline that a PipelineRun diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go index 1aff7f0440..96bb846deb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go @@ -19,6 +19,8 @@ package v1beta1 import ( "context" + "github.com/tektoncd/pipeline/pkg/apis/config" + "github.com/tektoncd/pipeline/pkg/apis/version" "k8s.io/apimachinery/pkg/api/equality" "knative.dev/pkg/apis" ) @@ -36,7 +38,7 @@ var allVolumeSourceFields = []string{ // Validate looks at the Volume provided in wb and makes sure that it is valid. // This means that only one VolumeSource can be specified, and also that the // supported VolumeSource is itself valid. -func (b *WorkspaceBinding) Validate(ctx context.Context) *apis.FieldError { +func (b *WorkspaceBinding) Validate(ctx context.Context) (errs *apis.FieldError) { if equality.Semantic.DeepEqual(b, &WorkspaceBinding{}) || b == nil { return apis.ErrMissingField(apis.CurrentField) } @@ -66,6 +68,29 @@ func (b *WorkspaceBinding) Validate(ctx context.Context) *apis.FieldError { return apis.ErrMissingField("secret.secretName") } + // The projected workspace is only supported when the alpha feature gate is enabled. + // For a Projected volume to work, you must provide at least one source. + if b.Projected != nil { + if err := version.ValidateEnabledAPIFields(ctx, "projected workspace type", config.AlphaAPIFields).ViaField("workspace"); err != nil { + return err + } + if len(b.Projected.Sources) == 0 { + return apis.ErrMissingField("projected.sources") + } + } + + // The csi workspace is only supported when the alpha feature gate is enabled. + // For a CSI to work, you must provide and have installed the driver to use. + if b.CSI != nil { + errs := version.ValidateEnabledAPIFields(ctx, "csi workspace type", config.AlphaAPIFields).ViaField("workspaces") + if errs != nil { + return errs + } + if b.CSI.Driver == "" { + return apis.ErrMissingField("csi.driver") + } + } + return nil } @@ -88,5 +113,11 @@ func (b *WorkspaceBinding) numSources() int { if b.Secret != nil { n++ } + if b.Projected != nil { + n++ + } + if b.CSI != nil { + n++ + } return n } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go index ad3f2c5ff7..a793eb3d63 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go @@ -435,6 +435,7 @@ func (in *PipelineResourceResult) DeepCopy() *PipelineResourceResult { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineResult) DeepCopyInto(out *PipelineResult) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -512,6 +513,7 @@ func (in *PipelineRunList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PipelineRunResult) DeepCopyInto(out *PipelineRunResult) { *out = *in + in.Value.DeepCopyInto(&out.Value) return } @@ -684,7 +686,9 @@ func (in *PipelineRunStatusFields) DeepCopyInto(out *PipelineRunStatusFields) { if in.PipelineResults != nil { in, out := &in.PipelineResults, &out.PipelineResults *out = make([]PipelineRunResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.PipelineSpec != nil { in, out := &in.PipelineSpec, &out.PipelineSpec @@ -776,7 +780,9 @@ func (in *PipelineSpec) DeepCopyInto(out *PipelineSpec) { if in.Results != nil { in, out := &in.Results, &out.Results *out = make([]PipelineResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } if in.Finally != nil { in, out := &in.Finally, &out.Finally @@ -1041,6 +1047,11 @@ func (in *PipelineTaskRunSpec) DeepCopyInto(out *PipelineTaskRunSpec) { *out = new(PipelineTaskMetadata) (*in).DeepCopyInto(*out) } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -1347,6 +1358,16 @@ func (in *Step) DeepCopyInto(out *Step) { *out = make([]WorkspaceUsage, len(*in)) copy(*out, *in) } + if in.StdoutConfig != nil { + in, out := &in.StdoutConfig, &out.StdoutConfig + *out = new(StepOutputConfig) + **out = **in + } + if in.StderrConfig != nil { + in, out := &in.StderrConfig, &out.StderrConfig + *out = new(StepOutputConfig) + **out = **in + } return } @@ -1360,6 +1381,22 @@ func (in *Step) DeepCopy() *Step { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepOutputConfig) DeepCopyInto(out *StepOutputConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepOutputConfig. +func (in *StepOutputConfig) DeepCopy() *StepOutputConfig { + if in == nil { + return nil + } + out := new(StepOutputConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepState) DeepCopyInto(out *StepState) { *out = *in @@ -1605,6 +1642,13 @@ func (in *TaskResources) DeepCopy() *TaskResources { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TaskResult) DeepCopyInto(out *TaskResult) { *out = *in + if in.Properties != nil { + in, out := &in.Properties, &out.Properties + *out = make(map[string]PropertySpec, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -1878,6 +1922,11 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ComputeResources != nil { + in, out := &in.ComputeResources, &out.ComputeResources + *out = new(v1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } @@ -2044,7 +2093,9 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { if in.Results != nil { in, out := &in.Results, &out.Results *out = make([]TaskResult, len(*in)) - copy(*out, *in) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } @@ -2161,6 +2212,16 @@ func (in *WorkspaceBinding) DeepCopyInto(out *WorkspaceBinding) { *out = new(v1.SecretVolumeSource) (*in).DeepCopyInto(*out) } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(v1.ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(v1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go similarity index 98% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go rename to vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go index 4d58eeb755..bf8f6bf156 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/version/version_validation.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package version import ( "context" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go index 0681ce1abd..0763f48419 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go @@ -22,6 +22,7 @@ import ( "fmt" "net/http" + tektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" discovery "k8s.io/client-go/discovery" @@ -33,6 +34,7 @@ type Interface interface { Discovery() discovery.DiscoveryInterface TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface TektonV1beta1() tektonv1beta1.TektonV1beta1Interface + TektonV1() tektonv1.TektonV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -41,6 +43,7 @@ type Clientset struct { *discovery.DiscoveryClient tektonV1alpha1 *tektonv1alpha1.TektonV1alpha1Client tektonV1beta1 *tektonv1beta1.TektonV1beta1Client + tektonV1 *tektonv1.TektonV1Client } // TektonV1alpha1 retrieves the TektonV1alpha1Client @@ -53,6 +56,11 @@ func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface { return c.tektonV1beta1 } +// TektonV1 retrieves the TektonV1Client +func (c *Clientset) TektonV1() tektonv1.TektonV1Interface { + return c.tektonV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -101,6 +109,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.tektonV1, err = tektonv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -124,6 +136,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.tektonV1alpha1 = tektonv1alpha1.New(c) cs.tektonV1beta1 = tektonv1beta1.New(c) + cs.tektonV1 = tektonv1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go index 473078696a..9821878898 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -20,6 +20,8 @@ package fake import ( clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + tektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" + faketektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" faketektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" @@ -90,3 +92,8 @@ func (c *Clientset) TektonV1alpha1() tektonv1alpha1.TektonV1alpha1Interface { func (c *Clientset) TektonV1beta1() tektonv1beta1.TektonV1beta1Interface { return &faketektonv1beta1.FakeTektonV1beta1{Fake: &c.Fake} } + +// TektonV1 retrieves the TektonV1Client +func (c *Clientset) TektonV1() tektonv1.TektonV1Interface { + return &faketektonv1.FakeTektonV1{Fake: &c.Fake} +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go index 644e02d596..951cec59e8 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake/register.go @@ -19,6 +19,7 @@ limitations under the License. package fake import ( + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +35,7 @@ var codecs = serializer.NewCodecFactory(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ tektonv1alpha1.AddToScheme, tektonv1beta1.AddToScheme, + tektonv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go index 757e6eb815..21c3e532df 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go @@ -19,6 +19,7 @@ limitations under the License. package scheme import ( + tektonv1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" tektonv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" tektonv1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,6 +35,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ tektonv1alpha1.AddToScheme, tektonv1beta1.AddToScheme, + tektonv1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go index d545dddd31..6f9b4198cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,13 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by client-gen. DO NOT EDIT. -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// TaskObject is implemented by Task and ClusterTask -type TaskObject interface { - TaskMetadata() metav1.ObjectMeta - TaskSpec() TaskSpec - Copy() TaskObject -} +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/doc.go similarity index 64% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/doc.go index fe18e3e697..1a72e0befe 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,17 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by client-gen. DO NOT EDIT. -import ( - "context" - - "knative.dev/pkg/apis" -) - -var _ apis.Defaultable = (*ClusterTask)(nil) - -// SetDefaults sets the default values for the ClusterTask's Spec. -func (t *ClusterTask) SetDefaults(ctx context.Context) { - t.Spec.SetDefaults(ctx) -} +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go new file mode 100644 index 0000000000..dced9d8bc5 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_pipeline_client.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeTektonV1 struct { + *testing.Fake +} + +func (c *FakeTektonV1) Tasks(namespace string) v1.TaskInterface { + return &FakeTasks{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeTektonV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_task.go similarity index 72% rename from vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_task.go index f3dea1b1f5..8d1e7570ed 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake/fake_task.go @@ -21,7 +21,7 @@ package fake import ( "context" - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -32,29 +32,29 @@ import ( // FakeTasks implements TaskInterface type FakeTasks struct { - Fake *FakeTektonV1alpha1 + Fake *FakeTektonV1 ns string } -var tasksResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "tasks"} +var tasksResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1", Resource: "tasks"} -var tasksKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "Task"} +var tasksKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1", Kind: "Task"} // Get takes name of the task, and returns the corresponding task object, and an error if there is any. -func (c *FakeTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Task, err error) { +func (c *FakeTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *pipelinev1.Task, err error) { obj, err := c.Fake. - Invokes(testing.NewGetAction(tasksResource, c.ns, name), &v1alpha1.Task{}) + Invokes(testing.NewGetAction(tasksResource, c.ns, name), &pipelinev1.Task{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.Task), err + return obj.(*pipelinev1.Task), err } // List takes label and field selectors, and returns the list of Tasks that match those selectors. -func (c *FakeTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskList, err error) { +func (c *FakeTasks) List(ctx context.Context, opts v1.ListOptions) (result *pipelinev1.TaskList, err error) { obj, err := c.Fake. - Invokes(testing.NewListAction(tasksResource, tasksKind, c.ns, opts), &v1alpha1.TaskList{}) + Invokes(testing.NewListAction(tasksResource, tasksKind, c.ns, opts), &pipelinev1.TaskList{}) if obj == nil { return nil, err @@ -64,8 +64,8 @@ func (c *FakeTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1al if label == nil { label = labels.Everything() } - list := &v1alpha1.TaskList{ListMeta: obj.(*v1alpha1.TaskList).ListMeta} - for _, item := range obj.(*v1alpha1.TaskList).Items { + list := &pipelinev1.TaskList{ListMeta: obj.(*pipelinev1.TaskList).ListMeta} + for _, item := range obj.(*pipelinev1.TaskList).Items { if label.Matches(labels.Set(item.Labels)) { list.Items = append(list.Items, item) } @@ -81,31 +81,31 @@ func (c *FakeTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Inter } // Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. -func (c *FakeTasks) Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateOptions) (result *v1alpha1.Task, err error) { +func (c *FakeTasks) Create(ctx context.Context, task *pipelinev1.Task, opts v1.CreateOptions) (result *pipelinev1.Task, err error) { obj, err := c.Fake. - Invokes(testing.NewCreateAction(tasksResource, c.ns, task), &v1alpha1.Task{}) + Invokes(testing.NewCreateAction(tasksResource, c.ns, task), &pipelinev1.Task{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.Task), err + return obj.(*pipelinev1.Task), err } // Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. -func (c *FakeTasks) Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateOptions) (result *v1alpha1.Task, err error) { +func (c *FakeTasks) Update(ctx context.Context, task *pipelinev1.Task, opts v1.UpdateOptions) (result *pipelinev1.Task, err error) { obj, err := c.Fake. - Invokes(testing.NewUpdateAction(tasksResource, c.ns, task), &v1alpha1.Task{}) + Invokes(testing.NewUpdateAction(tasksResource, c.ns, task), &pipelinev1.Task{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.Task), err + return obj.(*pipelinev1.Task), err } // Delete takes name of the task and deletes it. Returns an error if one occurs. func (c *FakeTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(tasksResource, c.ns, name, opts), &v1alpha1.Task{}) + Invokes(testing.NewDeleteActionWithOptions(tasksResource, c.ns, name, opts), &pipelinev1.Task{}) return err } @@ -114,17 +114,17 @@ func (c *FakeTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptio func (c *FakeTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { action := testing.NewDeleteCollectionAction(tasksResource, c.ns, listOpts) - _, err := c.Fake.Invokes(action, &v1alpha1.TaskList{}) + _, err := c.Fake.Invokes(action, &pipelinev1.TaskList{}) return err } // Patch applies the patch and returns the patched task. -func (c *FakeTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) { +func (c *FakeTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.Task, err error) { obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(tasksResource, c.ns, name, pt, data, subresources...), &v1alpha1.Task{}) + Invokes(testing.NewPatchSubresourceAction(tasksResource, c.ns, name, pt, data, subresources...), &pipelinev1.Task{}) if obj == nil { return nil, err } - return obj.(*v1alpha1.Task), err + return obj.(*pipelinev1.Task), err } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go similarity index 81% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go index 7536360d31..1d86b44247 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,4 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type TaskExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go new file mode 100644 index 0000000000..d2baa0206d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/pipeline_client.go @@ -0,0 +1,107 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type TektonV1Interface interface { + RESTClient() rest.Interface + TasksGetter +} + +// TektonV1Client is used to interact with features provided by the tekton.dev group. +type TektonV1Client struct { + restClient rest.Interface +} + +func (c *TektonV1Client) Tasks(namespace string) TaskInterface { + return newTasks(c, namespace) +} + +// NewForConfig creates a new TektonV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*TektonV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new TektonV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*TektonV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &TektonV1Client{client}, nil +} + +// NewForConfigOrDie creates a new TektonV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *TektonV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new TektonV1Client for the given RESTClient. +func New(c rest.Interface) *TektonV1Client { + return &TektonV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *TektonV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go similarity index 67% rename from vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go index ba5b46d27d..d900e6f612 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/task.go @@ -16,15 +16,15 @@ limitations under the License. // Code generated by client-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" "time" - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" rest "k8s.io/client-go/rest" @@ -38,14 +38,14 @@ type TasksGetter interface { // TaskInterface has methods to work with Task resources. type TaskInterface interface { - Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateOptions) (*v1alpha1.Task, error) - Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateOptions) (*v1alpha1.Task, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Task, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) + Create(ctx context.Context, task *v1.Task, opts metav1.CreateOptions) (*v1.Task, error) + Update(ctx context.Context, task *v1.Task, opts metav1.UpdateOptions) (*v1.Task, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Task, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.TaskList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Task, err error) TaskExpansion } @@ -56,7 +56,7 @@ type tasks struct { } // newTasks returns a Tasks -func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { +func newTasks(c *TektonV1Client, namespace string) *tasks { return &tasks{ client: c.RESTClient(), ns: namespace, @@ -64,8 +64,8 @@ func newTasks(c *TektonV1alpha1Client, namespace string) *tasks { } // Get takes name of the task, and returns the corresponding task object, and an error if there is any. -func (c *tasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Get(). Namespace(c.ns). Resource("tasks"). @@ -77,12 +77,12 @@ func (c *tasks) Get(ctx context.Context, name string, options v1.GetOptions) (re } // List takes label and field selectors, and returns the list of Tasks that match those selectors. -func (c *tasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskList, err error) { +func (c *tasks) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TaskList, err error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second } - result = &v1alpha1.TaskList{} + result = &v1.TaskList{} err = c.client.Get(). Namespace(c.ns). Resource("tasks"). @@ -94,7 +94,7 @@ func (c *tasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1 } // Watch returns a watch.Interface that watches the requested tasks. -func (c *tasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (c *tasks) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { var timeout time.Duration if opts.TimeoutSeconds != nil { timeout = time.Duration(*opts.TimeoutSeconds) * time.Second @@ -109,8 +109,8 @@ func (c *tasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface } // Create takes the representation of a task and creates it. Returns the server's representation of the task, and an error, if there is any. -func (c *tasks) Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Create(ctx context.Context, task *v1.Task, opts metav1.CreateOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Post(). Namespace(c.ns). Resource("tasks"). @@ -122,8 +122,8 @@ func (c *tasks) Create(ctx context.Context, task *v1alpha1.Task, opts v1.CreateO } // Update takes the representation of a task and updates it. Returns the server's representation of the task, and an error, if there is any. -func (c *tasks) Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateOptions) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Update(ctx context.Context, task *v1.Task, opts metav1.UpdateOptions) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Put(). Namespace(c.ns). Resource("tasks"). @@ -136,7 +136,7 @@ func (c *tasks) Update(ctx context.Context, task *v1alpha1.Task, opts v1.UpdateO } // Delete takes name of the task and deletes it. Returns an error if one occurs. -func (c *tasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (c *tasks) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("tasks"). @@ -147,7 +147,7 @@ func (c *tasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) } // DeleteCollection deletes a collection of objects. -func (c *tasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (c *tasks) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { var timeout time.Duration if listOpts.TimeoutSeconds != nil { timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second @@ -163,8 +163,8 @@ func (c *tasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, lis } // Patch applies the patch and returns the patched task. -func (c *tasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) { - result = &v1alpha1.Task{} +func (c *tasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Task, err error) { + result = &v1.Task{} err = c.client.Patch(pt). Namespace(c.ns). Resource("tasks"). diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go deleted file mode 100644 index 570202483b..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// ClusterTasksGetter has a method to return a ClusterTaskInterface. -// A group's client should implement this interface. -type ClusterTasksGetter interface { - ClusterTasks() ClusterTaskInterface -} - -// ClusterTaskInterface has methods to work with ClusterTask resources. -type ClusterTaskInterface interface { - Create(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.CreateOptions) (*v1alpha1.ClusterTask, error) - Update(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.UpdateOptions) (*v1alpha1.ClusterTask, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTask, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTaskList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) - ClusterTaskExpansion -} - -// clusterTasks implements ClusterTaskInterface -type clusterTasks struct { - client rest.Interface -} - -// newClusterTasks returns a ClusterTasks -func newClusterTasks(c *TektonV1alpha1Client) *clusterTasks { - return &clusterTasks{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. -func (c *clusterTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Get(). - Resource("clustertasks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. -func (c *clusterTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTaskList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterTaskList{} - err = c.client.Get(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterTasks. -func (c *clusterTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *clusterTasks) Create(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.CreateOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Post(). - Resource("clustertasks"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTask). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *clusterTasks) Update(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.UpdateOptions) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Put(). - Resource("clustertasks"). - Name(clusterTask.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterTask). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. -func (c *clusterTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustertasks"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustertasks"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterTask. -func (c *clusterTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) { - result = &v1alpha1.ClusterTask{} - err = c.client.Patch(pt). - Resource("clustertasks"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go deleted file mode 100644 index 65275200cb..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_clustertask.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeClusterTasks implements ClusterTaskInterface -type FakeClusterTasks struct { - Fake *FakeTektonV1alpha1 -} - -var clustertasksResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "clustertasks"} - -var clustertasksKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "ClusterTask"} - -// Get takes name of the clusterTask, and returns the corresponding clusterTask object, and an error if there is any. -func (c *FakeClusterTasks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterTask, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustertasksResource, name), &v1alpha1.ClusterTask{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTask), err -} - -// List takes label and field selectors, and returns the list of ClusterTasks that match those selectors. -func (c *FakeClusterTasks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterTaskList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustertasksResource, clustertasksKind, opts), &v1alpha1.ClusterTaskList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterTaskList{ListMeta: obj.(*v1alpha1.ClusterTaskList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterTaskList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterTasks. -func (c *FakeClusterTasks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustertasksResource, opts)) -} - -// Create takes the representation of a clusterTask and creates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *FakeClusterTasks) Create(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.CreateOptions) (result *v1alpha1.ClusterTask, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustertasksResource, clusterTask), &v1alpha1.ClusterTask{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTask), err -} - -// Update takes the representation of a clusterTask and updates it. Returns the server's representation of the clusterTask, and an error, if there is any. -func (c *FakeClusterTasks) Update(ctx context.Context, clusterTask *v1alpha1.ClusterTask, opts v1.UpdateOptions) (result *v1alpha1.ClusterTask, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustertasksResource, clusterTask), &v1alpha1.ClusterTask{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTask), err -} - -// Delete takes name of the clusterTask and deletes it. Returns an error if one occurs. -func (c *FakeClusterTasks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustertasksResource, name, opts), &v1alpha1.ClusterTask{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterTasks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustertasksResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterTaskList{}) - return err -} - -// Patch applies the patch and returns the patched clusterTask. -func (c *FakeClusterTasks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustertasksResource, name, pt, data, subresources...), &v1alpha1.ClusterTask{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterTask), err -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go deleted file mode 100644 index d79e790328..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakePipelines implements PipelineInterface -type FakePipelines struct { - Fake *FakeTektonV1alpha1 - ns string -} - -var pipelinesResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "pipelines"} - -var pipelinesKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "Pipeline"} - -// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. -func (c *FakePipelines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(pipelinesResource, c.ns, name), &v1alpha1.Pipeline{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Pipeline), err -} - -// List takes label and field selectors, and returns the list of Pipelines that match those selectors. -func (c *FakePipelines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(pipelinesResource, pipelinesKind, c.ns, opts), &v1alpha1.PipelineList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PipelineList{ListMeta: obj.(*v1alpha1.PipelineList).ListMeta} - for _, item := range obj.(*v1alpha1.PipelineList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested pipelines. -func (c *FakePipelines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(pipelinesResource, c.ns, opts)) - -} - -// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *FakePipelines) Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (result *v1alpha1.Pipeline, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(pipelinesResource, c.ns, pipeline), &v1alpha1.Pipeline{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Pipeline), err -} - -// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *FakePipelines) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(pipelinesResource, c.ns, pipeline), &v1alpha1.Pipeline{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Pipeline), err -} - -// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. -func (c *FakePipelines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(pipelinesResource, c.ns, name, opts), &v1alpha1.Pipeline{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(pipelinesResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PipelineList{}) - return err -} - -// Patch applies the patch and returns the patched pipeline. -func (c *FakePipelines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(pipelinesResource, c.ns, name, pt, data, subresources...), &v1alpha1.Pipeline{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.Pipeline), err -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go index 72bad222bd..7ef7a75e92 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipeline_client.go @@ -28,30 +28,10 @@ type FakeTektonV1alpha1 struct { *testing.Fake } -func (c *FakeTektonV1alpha1) ClusterTasks() v1alpha1.ClusterTaskInterface { - return &FakeClusterTasks{c} -} - -func (c *FakeTektonV1alpha1) Pipelines(namespace string) v1alpha1.PipelineInterface { - return &FakePipelines{c, namespace} -} - -func (c *FakeTektonV1alpha1) PipelineRuns(namespace string) v1alpha1.PipelineRunInterface { - return &FakePipelineRuns{c, namespace} -} - func (c *FakeTektonV1alpha1) Runs(namespace string) v1alpha1.RunInterface { return &FakeRuns{c, namespace} } -func (c *FakeTektonV1alpha1) Tasks(namespace string) v1alpha1.TaskInterface { - return &FakeTasks{c, namespace} -} - -func (c *FakeTektonV1alpha1) TaskRuns(namespace string) v1alpha1.TaskRunInterface { - return &FakeTaskRuns{c, namespace} -} - // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. func (c *FakeTektonV1alpha1) RESTClient() rest.Interface { diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go deleted file mode 100644 index 6573a028bb..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_pipelinerun.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakePipelineRuns implements PipelineRunInterface -type FakePipelineRuns struct { - Fake *FakeTektonV1alpha1 - ns string -} - -var pipelinerunsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "pipelineruns"} - -var pipelinerunsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "PipelineRun"} - -// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. -func (c *FakePipelineRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PipelineRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(pipelinerunsResource, c.ns, name), &v1alpha1.PipelineRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PipelineRun), err -} - -// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. -func (c *FakePipelineRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineRunList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(pipelinerunsResource, pipelinerunsKind, c.ns, opts), &v1alpha1.PipelineRunList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.PipelineRunList{ListMeta: obj.(*v1alpha1.PipelineRunList).ListMeta} - for _, item := range obj.(*v1alpha1.PipelineRunList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested pipelineRuns. -func (c *FakePipelineRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(pipelinerunsResource, c.ns, opts)) - -} - -// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *FakePipelineRuns) Create(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.CreateOptions) (result *v1alpha1.PipelineRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(pipelinerunsResource, c.ns, pipelineRun), &v1alpha1.PipelineRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PipelineRun), err -} - -// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *FakePipelineRuns) Update(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (result *v1alpha1.PipelineRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(pipelinerunsResource, c.ns, pipelineRun), &v1alpha1.PipelineRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PipelineRun), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePipelineRuns) UpdateStatus(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(pipelinerunsResource, "status", c.ns, pipelineRun), &v1alpha1.PipelineRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PipelineRun), err -} - -// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. -func (c *FakePipelineRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(pipelinerunsResource, c.ns, name, opts), &v1alpha1.PipelineRun{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePipelineRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(pipelinerunsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.PipelineRunList{}) - return err -} - -// Patch applies the patch and returns the patched pipelineRun. -func (c *FakePipelineRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(pipelinerunsResource, c.ns, name, pt, data, subresources...), &v1alpha1.PipelineRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.PipelineRun), err -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go deleted file mode 100644 index 72082b2668..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake/fake_taskrun.go +++ /dev/null @@ -1,142 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeTaskRuns implements TaskRunInterface -type FakeTaskRuns struct { - Fake *FakeTektonV1alpha1 - ns string -} - -var taskrunsResource = schema.GroupVersionResource{Group: "tekton.dev", Version: "v1alpha1", Resource: "taskruns"} - -var taskrunsKind = schema.GroupVersionKind{Group: "tekton.dev", Version: "v1alpha1", Kind: "TaskRun"} - -// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. -func (c *FakeTaskRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TaskRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewGetAction(taskrunsResource, c.ns, name), &v1alpha1.TaskRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.TaskRun), err -} - -// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. -func (c *FakeTaskRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskRunList, err error) { - obj, err := c.Fake. - Invokes(testing.NewListAction(taskrunsResource, taskrunsKind, c.ns, opts), &v1alpha1.TaskRunList{}) - - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.TaskRunList{ListMeta: obj.(*v1alpha1.TaskRunList).ListMeta} - for _, item := range obj.(*v1alpha1.TaskRunList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested taskRuns. -func (c *FakeTaskRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchAction(taskrunsResource, c.ns, opts)) - -} - -// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *FakeTaskRuns) Create(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.CreateOptions) (result *v1alpha1.TaskRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewCreateAction(taskrunsResource, c.ns, taskRun), &v1alpha1.TaskRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.TaskRun), err -} - -// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *FakeTaskRuns) Update(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (result *v1alpha1.TaskRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateAction(taskrunsResource, c.ns, taskRun), &v1alpha1.TaskRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.TaskRun), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeTaskRuns) UpdateStatus(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) { - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceAction(taskrunsResource, "status", c.ns, taskRun), &v1alpha1.TaskRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.TaskRun), err -} - -// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. -func (c *FakeTaskRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(taskrunsResource, c.ns, name, opts), &v1alpha1.TaskRun{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeTaskRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(taskrunsResource, c.ns, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.TaskRunList{}) - return err -} - -// Patch applies the patch and returns the patched taskRun. -func (c *FakeTaskRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) { - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(taskrunsResource, c.ns, name, pt, data, subresources...), &v1alpha1.TaskRun{}) - - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.TaskRun), err -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go index 58b8994efd..40814697cf 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go @@ -18,14 +18,4 @@ limitations under the License. package v1alpha1 -type ClusterTaskExpansion interface{} - -type PipelineExpansion interface{} - -type PipelineRunExpansion interface{} - type RunExpansion interface{} - -type TaskExpansion interface{} - -type TaskRunExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go deleted file mode 100644 index cc0a370f77..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PipelinesGetter has a method to return a PipelineInterface. -// A group's client should implement this interface. -type PipelinesGetter interface { - Pipelines(namespace string) PipelineInterface -} - -// PipelineInterface has methods to work with Pipeline resources. -type PipelineInterface interface { - Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (*v1alpha1.Pipeline, error) - Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Pipeline, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) - PipelineExpansion -} - -// pipelines implements PipelineInterface -type pipelines struct { - client rest.Interface - ns string -} - -// newPipelines returns a Pipelines -func newPipelines(c *TektonV1alpha1Client, namespace string) *pipelines { - return &pipelines{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pipeline, and returns the corresponding pipeline object, and an error if there is any. -func (c *pipelines) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pipelines that match those selectors. -func (c *pipelines) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PipelineList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pipelines. -func (c *pipelines) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pipeline and creates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Create(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.CreateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pipeline and updates it. Returns the server's representation of the pipeline, and an error, if there is any. -func (c *pipelines) Update(ctx context.Context, pipeline *v1alpha1.Pipeline, opts v1.UpdateOptions) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelines"). - Name(pipeline.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipeline). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pipeline and deletes it. Returns an error if one occurs. -func (c *pipelines) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pipelines) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelines"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pipeline. -func (c *pipelines) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { - result = &v1alpha1.Pipeline{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pipelines"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go index 3392e3a95d..28d39482a6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go @@ -28,12 +28,7 @@ import ( type TektonV1alpha1Interface interface { RESTClient() rest.Interface - ClusterTasksGetter - PipelinesGetter - PipelineRunsGetter RunsGetter - TasksGetter - TaskRunsGetter } // TektonV1alpha1Client is used to interact with features provided by the tekton.dev group. @@ -41,30 +36,10 @@ type TektonV1alpha1Client struct { restClient rest.Interface } -func (c *TektonV1alpha1Client) ClusterTasks() ClusterTaskInterface { - return newClusterTasks(c) -} - -func (c *TektonV1alpha1Client) Pipelines(namespace string) PipelineInterface { - return newPipelines(c, namespace) -} - -func (c *TektonV1alpha1Client) PipelineRuns(namespace string) PipelineRunInterface { - return newPipelineRuns(c, namespace) -} - func (c *TektonV1alpha1Client) Runs(namespace string) RunInterface { return newRuns(c, namespace) } -func (c *TektonV1alpha1Client) Tasks(namespace string) TaskInterface { - return newTasks(c, namespace) -} - -func (c *TektonV1alpha1Client) TaskRuns(namespace string) TaskRunInterface { - return newTaskRuns(c, namespace) -} - // NewForConfig creates a new TektonV1alpha1Client for the given config. // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), // where httpClient was generated with rest.HTTPClientFor(c). diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go deleted file mode 100644 index cdf2a1e367..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// PipelineRunsGetter has a method to return a PipelineRunInterface. -// A group's client should implement this interface. -type PipelineRunsGetter interface { - PipelineRuns(namespace string) PipelineRunInterface -} - -// PipelineRunInterface has methods to work with PipelineRun resources. -type PipelineRunInterface interface { - Create(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.CreateOptions) (*v1alpha1.PipelineRun, error) - Update(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) - UpdateStatus(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PipelineRun, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineRunList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) - PipelineRunExpansion -} - -// pipelineRuns implements PipelineRunInterface -type pipelineRuns struct { - client rest.Interface - ns string -} - -// newPipelineRuns returns a PipelineRuns -func newPipelineRuns(c *TektonV1alpha1Client, namespace string) *pipelineRuns { - return &pipelineRuns{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pipelineRun, and returns the corresponding pipelineRun object, and an error if there is any. -func (c *pipelineRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PipelineRuns that match those selectors. -func (c *pipelineRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PipelineRunList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.PipelineRunList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pipelineRuns. -func (c *pipelineRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a pipelineRun and creates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *pipelineRuns) Create(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.CreateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a pipelineRun and updates it. Returns the server's representation of the pipelineRun, and an error, if there is any. -func (c *pipelineRuns) Update(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(pipelineRun.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *pipelineRuns) UpdateStatus(ctx context.Context, pipelineRun *v1alpha1.PipelineRun, opts v1.UpdateOptions) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(pipelineRun.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(pipelineRun). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the pipelineRun and deletes it. Returns an error if one occurs. -func (c *pipelineRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pipelineRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("pipelineruns"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched pipelineRun. -func (c *pipelineRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) { - result = &v1alpha1.PipelineRun{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pipelineruns"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go deleted file mode 100644 index d3e7f1b704..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - "time" - - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - scheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// TaskRunsGetter has a method to return a TaskRunInterface. -// A group's client should implement this interface. -type TaskRunsGetter interface { - TaskRuns(namespace string) TaskRunInterface -} - -// TaskRunInterface has methods to work with TaskRun resources. -type TaskRunInterface interface { - Create(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.CreateOptions) (*v1alpha1.TaskRun, error) - Update(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) - UpdateStatus(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.TaskRun, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskRunList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) - TaskRunExpansion -} - -// taskRuns implements TaskRunInterface -type taskRuns struct { - client rest.Interface - ns string -} - -// newTaskRuns returns a TaskRuns -func newTaskRuns(c *TektonV1alpha1Client, namespace string) *taskRuns { - return &taskRuns{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the taskRun, and returns the corresponding taskRun object, and an error if there is any. -func (c *taskRuns) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of TaskRuns that match those selectors. -func (c *taskRuns) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.TaskRunList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.TaskRunList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested taskRuns. -func (c *taskRuns) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a taskRun and creates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *taskRuns) Create(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.CreateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Post(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a taskRun and updates it. Returns the server's representation of the taskRun, and an error, if there is any. -func (c *taskRuns) Update(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("taskruns"). - Name(taskRun.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *taskRuns) UpdateStatus(ctx context.Context, taskRun *v1alpha1.TaskRun, opts v1.UpdateOptions) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Put(). - Namespace(c.ns). - Resource("taskruns"). - Name(taskRun.Name). - SubResource("status"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(taskRun). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the taskRun and deletes it. Returns an error if one occurs. -func (c *taskRuns) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *taskRuns) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("taskruns"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched taskRun. -func (c *taskRuns) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) { - result = &v1alpha1.TaskRun{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("taskruns"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go index a5efe0759f..44aa0a6f8a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go @@ -21,6 +21,7 @@ package externalversions import ( "fmt" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -53,19 +54,13 @@ func (f *genericInformer) Lister() cache.GenericLister { // TODO extend this to unknown resources with a client pool func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { switch resource { - // Group=tekton.dev, Version=v1alpha1 - case v1alpha1.SchemeGroupVersion.WithResource("clustertasks"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().ClusterTasks().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("pipelines"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Pipelines().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("pipelineruns"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().PipelineRuns().Informer()}, nil + // Group=tekton.dev, Version=v1 + case v1.SchemeGroupVersion.WithResource("tasks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1().Tasks().Informer()}, nil + + // Group=tekton.dev, Version=v1alpha1 case v1alpha1.SchemeGroupVersion.WithResource("runs"): return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Runs().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("tasks"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().Tasks().Informer()}, nil - case v1alpha1.SchemeGroupVersion.WithResource("taskruns"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Tekton().V1alpha1().TaskRuns().Informer()}, nil // Group=tekton.dev, Version=v1beta1 case v1beta1.SchemeGroupVersion.WithResource("clustertasks"): diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go index bd4ec535fb..71e38c5773 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go @@ -20,6 +20,7 @@ package pipeline import ( internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" + v1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1" v1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" ) @@ -30,6 +31,8 @@ type Interface interface { V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface } type group struct { @@ -52,3 +55,8 @@ func (g *group) V1alpha1() v1alpha1.Interface { func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go new file mode 100644 index 0000000000..050e7303cf --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2020 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Tasks returns a TaskInformer. + Tasks() TaskInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Tasks returns a TaskInformer. +func (v *version) Tasks() TaskInformer { + return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/task.go similarity index 78% rename from vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/task.go index 4e5320f568..2e6f539e1b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1/task.go @@ -16,17 +16,17 @@ limitations under the License. // Code generated by informer-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( "context" time "time" - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" cache "k8s.io/client-go/tools/cache" @@ -36,7 +36,7 @@ import ( // Tasks. type TaskInformer interface { Informer() cache.SharedIndexInformer - Lister() v1alpha1.TaskLister + Lister() v1.TaskLister } type taskInformer struct { @@ -58,20 +58,20 @@ func NewTaskInformer(client versioned.Interface, namespace string, resyncPeriod func NewFilteredTaskInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.TektonV1alpha1().Tasks(namespace).List(context.TODO(), options) + return client.TektonV1().Tasks(namespace).List(context.TODO(), options) }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.TektonV1alpha1().Tasks(namespace).Watch(context.TODO(), options) + return client.TektonV1().Tasks(namespace).Watch(context.TODO(), options) }, }, - &pipelinev1alpha1.Task{}, + &pipelinev1.Task{}, resyncPeriod, indexers, ) @@ -82,9 +82,9 @@ func (f *taskInformer) defaultInformer(client versioned.Interface, resyncPeriod } func (f *taskInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipelinev1alpha1.Task{}, f.defaultInformer) + return f.factory.InformerFor(&pipelinev1.Task{}, f.defaultInformer) } -func (f *taskInformer) Lister() v1alpha1.TaskLister { - return v1alpha1.NewTaskLister(f.Informer().GetIndexer()) +func (f *taskInformer) Lister() v1.TaskLister { + return v1.NewTaskLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go deleted file mode 100644 index 3cdc89018a..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterTaskInformer provides access to a shared informer and lister for -// ClusterTasks. -type ClusterTaskInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterTaskLister -} - -type clusterTaskInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewClusterTaskInformer constructs a new informer for ClusterTask type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterTaskInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterTaskInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterTaskInformer constructs a new informer for ClusterTask type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterTaskInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().ClusterTasks().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().ClusterTasks().Watch(context.TODO(), options) - }, - }, - &pipelinev1alpha1.ClusterTask{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterTaskInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterTaskInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterTaskInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipelinev1alpha1.ClusterTask{}, f.defaultInformer) -} - -func (f *clusterTaskInformer) Lister() v1alpha1.ClusterTaskLister { - return v1alpha1.NewClusterTaskLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go index 4fdb40457e..25a56f8134 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go @@ -24,18 +24,8 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ClusterTasks returns a ClusterTaskInformer. - ClusterTasks() ClusterTaskInformer - // Pipelines returns a PipelineInformer. - Pipelines() PipelineInformer - // PipelineRuns returns a PipelineRunInformer. - PipelineRuns() PipelineRunInformer // Runs returns a RunInformer. Runs() RunInformer - // Tasks returns a TaskInformer. - Tasks() TaskInformer - // TaskRuns returns a TaskRunInformer. - TaskRuns() TaskRunInformer } type version struct { @@ -49,32 +39,7 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ClusterTasks returns a ClusterTaskInformer. -func (v *version) ClusterTasks() ClusterTaskInformer { - return &clusterTaskInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - -// Pipelines returns a PipelineInformer. -func (v *version) Pipelines() PipelineInformer { - return &pipelineInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// PipelineRuns returns a PipelineRunInformer. -func (v *version) PipelineRuns() PipelineRunInformer { - return &pipelineRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // Runs returns a RunInformer. func (v *version) Runs() RunInformer { return &runInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} } - -// Tasks returns a TaskInformer. -func (v *version) Tasks() TaskInformer { - return &taskInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - -// TaskRuns returns a TaskRunInformer. -func (v *version) TaskRuns() TaskRunInformer { - return &taskRunInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go deleted file mode 100644 index 848ec0e548..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// PipelineInformer provides access to a shared informer and lister for -// Pipelines. -type PipelineInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.PipelineLister -} - -type pipelineInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPipelineInformer constructs a new informer for Pipeline type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPipelineInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPipelineInformer constructs a new informer for Pipeline type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPipelineInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().Pipelines(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().Pipelines(namespace).Watch(context.TODO(), options) - }, - }, - &pipelinev1alpha1.Pipeline{}, - resyncPeriod, - indexers, - ) -} - -func (f *pipelineInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPipelineInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *pipelineInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipelinev1alpha1.Pipeline{}, f.defaultInformer) -} - -func (f *pipelineInformer) Lister() v1alpha1.PipelineLister { - return v1alpha1.NewPipelineLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go deleted file mode 100644 index a8d5013806..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// PipelineRunInformer provides access to a shared informer and lister for -// PipelineRuns. -type PipelineRunInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.PipelineRunLister -} - -type pipelineRunInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPipelineRunInformer constructs a new informer for PipelineRun type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPipelineRunInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPipelineRunInformer constructs a new informer for PipelineRun type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPipelineRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().PipelineRuns(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().PipelineRuns(namespace).Watch(context.TODO(), options) - }, - }, - &pipelinev1alpha1.PipelineRun{}, - resyncPeriod, - indexers, - ) -} - -func (f *pipelineRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPipelineRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *pipelineRunInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipelinev1alpha1.PipelineRun{}, f.defaultInformer) -} - -func (f *pipelineRunInformer) Lister() v1alpha1.PipelineRunLister { - return v1alpha1.NewPipelineRunLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go deleted file mode 100644 index 41c3ae610e..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - pipelinev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - internalinterfaces "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces" - v1alpha1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - cache "k8s.io/client-go/tools/cache" -) - -// TaskRunInformer provides access to a shared informer and lister for -// TaskRuns. -type TaskRunInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.TaskRunLister -} - -type taskRunInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewTaskRunInformer constructs a new informer for TaskRun type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredTaskRunInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredTaskRunInformer constructs a new informer for TaskRun type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredTaskRunInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().TaskRuns(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.TektonV1alpha1().TaskRuns(namespace).Watch(context.TODO(), options) - }, - }, - &pipelinev1alpha1.TaskRun{}, - resyncPeriod, - indexers, - ) -} - -func (f *taskRunInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredTaskRunInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *taskRunInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&pipelinev1alpha1.TaskRun{}, f.defaultInformer) -} - -func (f *taskRunInformer) Lister() v1alpha1.TaskRunLister { - return v1alpha1.NewTaskRunLister(f.Informer().GetIndexer()) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go index 949065abe3..f0499efbcb 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/client/client.go @@ -24,9 +24,11 @@ import ( errors "errors" fmt "fmt" + pipelinev1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + typedtektonv1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1" typedtektonv1alpha1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" typedtektonv1beta1 "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -113,682 +115,31 @@ func (w *wrapTektonV1alpha1) RESTClient() rest.Interface { panic("RESTClient called on dynamic client!") } -func (w *wrapTektonV1alpha1) ClusterTasks() typedtektonv1alpha1.ClusterTaskInterface { - return &wrapTektonV1alpha1ClusterTaskImpl{ - dyn: w.dyn.Resource(schema.GroupVersionResource{ - Group: "tekton.dev", - Version: "v1alpha1", - Resource: "clustertasks", - }), - } -} - -type wrapTektonV1alpha1ClusterTaskImpl struct { - dyn dynamic.NamespaceableResourceInterface -} - -var _ typedtektonv1alpha1.ClusterTaskInterface = (*wrapTektonV1alpha1ClusterTaskImpl)(nil) - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Create(ctx context.Context, in *v1alpha1.ClusterTask, opts v1.CreateOptions) (*v1alpha1.ClusterTask, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "ClusterTask", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Create(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTask{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return w.dyn.Delete(ctx, name, opts) -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - return w.dyn.DeleteCollection(ctx, opts, listOpts) -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterTask, error) { - uo, err := w.dyn.Get(ctx, name, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTask{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterTaskList, error) { - uo, err := w.dyn.List(ctx, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTaskList{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterTask, err error) { - uo, err := w.dyn.Patch(ctx, name, pt, data, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTask{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Update(ctx context.Context, in *v1alpha1.ClusterTask, opts v1.UpdateOptions) (*v1alpha1.ClusterTask, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "ClusterTask", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Update(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTask{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) UpdateStatus(ctx context.Context, in *v1alpha1.ClusterTask, opts v1.UpdateOptions) (*v1alpha1.ClusterTask, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "ClusterTask", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.UpdateStatus(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.ClusterTask{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1ClusterTaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return nil, errors.New("NYI: Watch") -} - -func (w *wrapTektonV1alpha1) Pipelines(namespace string) typedtektonv1alpha1.PipelineInterface { - return &wrapTektonV1alpha1PipelineImpl{ - dyn: w.dyn.Resource(schema.GroupVersionResource{ - Group: "tekton.dev", - Version: "v1alpha1", - Resource: "pipelines", - }), - - namespace: namespace, - } -} - -type wrapTektonV1alpha1PipelineImpl struct { - dyn dynamic.NamespaceableResourceInterface - - namespace string -} - -var _ typedtektonv1alpha1.PipelineInterface = (*wrapTektonV1alpha1PipelineImpl)(nil) - -func (w *wrapTektonV1alpha1PipelineImpl) Create(ctx context.Context, in *v1alpha1.Pipeline, opts v1.CreateOptions) (*v1alpha1.Pipeline, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Pipeline", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Pipeline{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) -} - -func (w *wrapTektonV1alpha1PipelineImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) -} - -func (w *wrapTektonV1alpha1PipelineImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Pipeline, error) { - uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Pipeline{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineList, error) { - uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineList{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Pipeline, err error) { - uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Pipeline{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) Update(ctx context.Context, in *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Pipeline", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Pipeline{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) UpdateStatus(ctx context.Context, in *v1alpha1.Pipeline, opts v1.UpdateOptions) (*v1alpha1.Pipeline, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Pipeline", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Pipeline{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return nil, errors.New("NYI: Watch") -} - -func (w *wrapTektonV1alpha1) PipelineRuns(namespace string) typedtektonv1alpha1.PipelineRunInterface { - return &wrapTektonV1alpha1PipelineRunImpl{ - dyn: w.dyn.Resource(schema.GroupVersionResource{ - Group: "tekton.dev", - Version: "v1alpha1", - Resource: "pipelineruns", - }), - - namespace: namespace, - } -} - -type wrapTektonV1alpha1PipelineRunImpl struct { - dyn dynamic.NamespaceableResourceInterface - - namespace string -} - -var _ typedtektonv1alpha1.PipelineRunInterface = (*wrapTektonV1alpha1PipelineRunImpl)(nil) - -func (w *wrapTektonV1alpha1PipelineRunImpl) Create(ctx context.Context, in *v1alpha1.PipelineRun, opts v1.CreateOptions) (*v1alpha1.PipelineRun, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "PipelineRun", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRun{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PipelineRun, error) { - uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRun{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PipelineRunList, error) { - uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRunList{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.PipelineRun, err error) { - uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRun{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) Update(ctx context.Context, in *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "PipelineRun", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRun{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) UpdateStatus(ctx context.Context, in *v1alpha1.PipelineRun, opts v1.UpdateOptions) (*v1alpha1.PipelineRun, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "PipelineRun", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.PipelineRun{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1PipelineRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return nil, errors.New("NYI: Watch") -} - -func (w *wrapTektonV1alpha1) Runs(namespace string) typedtektonv1alpha1.RunInterface { - return &wrapTektonV1alpha1RunImpl{ - dyn: w.dyn.Resource(schema.GroupVersionResource{ - Group: "tekton.dev", - Version: "v1alpha1", - Resource: "runs", - }), - - namespace: namespace, - } -} - -type wrapTektonV1alpha1RunImpl struct { - dyn dynamic.NamespaceableResourceInterface - - namespace string -} - -var _ typedtektonv1alpha1.RunInterface = (*wrapTektonV1alpha1RunImpl)(nil) - -func (w *wrapTektonV1alpha1RunImpl) Create(ctx context.Context, in *v1alpha1.Run, opts v1.CreateOptions) (*v1alpha1.Run, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Run", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Run{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) -} - -func (w *wrapTektonV1alpha1RunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) -} - -func (w *wrapTektonV1alpha1RunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Run, error) { - uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Run{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RunList, error) { - uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.RunList{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Run, err error) { - uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Run{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) Update(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Run", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Run{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) UpdateStatus(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Run", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Run{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1RunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return nil, errors.New("NYI: Watch") -} - -func (w *wrapTektonV1alpha1) Tasks(namespace string) typedtektonv1alpha1.TaskInterface { - return &wrapTektonV1alpha1TaskImpl{ - dyn: w.dyn.Resource(schema.GroupVersionResource{ - Group: "tekton.dev", - Version: "v1alpha1", - Resource: "tasks", - }), - - namespace: namespace, - } -} - -type wrapTektonV1alpha1TaskImpl struct { - dyn dynamic.NamespaceableResourceInterface - - namespace string -} - -var _ typedtektonv1alpha1.TaskInterface = (*wrapTektonV1alpha1TaskImpl)(nil) - -func (w *wrapTektonV1alpha1TaskImpl) Create(ctx context.Context, in *v1alpha1.Task, opts v1.CreateOptions) (*v1alpha1.Task, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Task", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Task{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) -} - -func (w *wrapTektonV1alpha1TaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) -} - -func (w *wrapTektonV1alpha1TaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Task, error) { - uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Task{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskList, error) { - uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.TaskList{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Task, err error) { - uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Task{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) Update(ctx context.Context, in *v1alpha1.Task, opts v1.UpdateOptions) (*v1alpha1.Task, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Task", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Task{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) UpdateStatus(ctx context.Context, in *v1alpha1.Task, opts v1.UpdateOptions) (*v1alpha1.Task, error) { - in.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "tekton.dev", - Version: "v1alpha1", - Kind: "Task", - }) - uo := &unstructured.Unstructured{} - if err := convert(in, uo); err != nil { - return nil, err - } - uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) - if err != nil { - return nil, err - } - out := &v1alpha1.Task{} - if err := convert(uo, out); err != nil { - return nil, err - } - return out, nil -} - -func (w *wrapTektonV1alpha1TaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return nil, errors.New("NYI: Watch") -} - -func (w *wrapTektonV1alpha1) TaskRuns(namespace string) typedtektonv1alpha1.TaskRunInterface { - return &wrapTektonV1alpha1TaskRunImpl{ +func (w *wrapTektonV1alpha1) Runs(namespace string) typedtektonv1alpha1.RunInterface { + return &wrapTektonV1alpha1RunImpl{ dyn: w.dyn.Resource(schema.GroupVersionResource{ Group: "tekton.dev", Version: "v1alpha1", - Resource: "taskruns", + Resource: "runs", }), namespace: namespace, } } -type wrapTektonV1alpha1TaskRunImpl struct { +type wrapTektonV1alpha1RunImpl struct { dyn dynamic.NamespaceableResourceInterface namespace string } -var _ typedtektonv1alpha1.TaskRunInterface = (*wrapTektonV1alpha1TaskRunImpl)(nil) +var _ typedtektonv1alpha1.RunInterface = (*wrapTektonV1alpha1RunImpl)(nil) -func (w *wrapTektonV1alpha1TaskRunImpl) Create(ctx context.Context, in *v1alpha1.TaskRun, opts v1.CreateOptions) (*v1alpha1.TaskRun, error) { +func (w *wrapTektonV1alpha1RunImpl) Create(ctx context.Context, in *v1alpha1.Run, opts v1.CreateOptions) (*v1alpha1.Run, error) { in.SetGroupVersionKind(schema.GroupVersionKind{ Group: "tekton.dev", Version: "v1alpha1", - Kind: "TaskRun", + Kind: "Run", }) uo := &unstructured.Unstructured{} if err := convert(in, uo); err != nil { @@ -798,62 +149,62 @@ func (w *wrapTektonV1alpha1TaskRunImpl) Create(ctx context.Context, in *v1alpha1 if err != nil { return nil, err } - out := &v1alpha1.TaskRun{} + out := &v1alpha1.Run{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { +func (w *wrapTektonV1alpha1RunImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) } -func (w *wrapTektonV1alpha1TaskRunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { +func (w *wrapTektonV1alpha1RunImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) } -func (w *wrapTektonV1alpha1TaskRunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.TaskRun, error) { +func (w *wrapTektonV1alpha1RunImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Run, error) { uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) if err != nil { return nil, err } - out := &v1alpha1.TaskRun{} + out := &v1alpha1.Run{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.TaskRunList, error) { +func (w *wrapTektonV1alpha1RunImpl) List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.RunList, error) { uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) if err != nil { return nil, err } - out := &v1alpha1.TaskRunList{} + out := &v1alpha1.RunList{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.TaskRun, err error) { +func (w *wrapTektonV1alpha1RunImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Run, err error) { uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) if err != nil { return nil, err } - out := &v1alpha1.TaskRun{} + out := &v1alpha1.Run{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) Update(ctx context.Context, in *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) { +func (w *wrapTektonV1alpha1RunImpl) Update(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) { in.SetGroupVersionKind(schema.GroupVersionKind{ Group: "tekton.dev", Version: "v1alpha1", - Kind: "TaskRun", + Kind: "Run", }) uo := &unstructured.Unstructured{} if err := convert(in, uo); err != nil { @@ -863,18 +214,18 @@ func (w *wrapTektonV1alpha1TaskRunImpl) Update(ctx context.Context, in *v1alpha1 if err != nil { return nil, err } - out := &v1alpha1.TaskRun{} + out := &v1alpha1.Run{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) UpdateStatus(ctx context.Context, in *v1alpha1.TaskRun, opts v1.UpdateOptions) (*v1alpha1.TaskRun, error) { +func (w *wrapTektonV1alpha1RunImpl) UpdateStatus(ctx context.Context, in *v1alpha1.Run, opts v1.UpdateOptions) (*v1alpha1.Run, error) { in.SetGroupVersionKind(schema.GroupVersionKind{ Group: "tekton.dev", Version: "v1alpha1", - Kind: "TaskRun", + Kind: "Run", }) uo := &unstructured.Unstructured{} if err := convert(in, uo); err != nil { @@ -884,14 +235,14 @@ func (w *wrapTektonV1alpha1TaskRunImpl) UpdateStatus(ctx context.Context, in *v1 if err != nil { return nil, err } - out := &v1alpha1.TaskRun{} + out := &v1alpha1.Run{} if err := convert(uo, out); err != nil { return nil, err } return out, nil } -func (w *wrapTektonV1alpha1TaskRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { +func (w *wrapTektonV1alpha1RunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return nil, errors.New("NYI: Watch") } @@ -1560,3 +911,149 @@ func (w *wrapTektonV1beta1TaskRunImpl) UpdateStatus(ctx context.Context, in *v1b func (w *wrapTektonV1beta1TaskRunImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return nil, errors.New("NYI: Watch") } + +// TektonV1 retrieves the TektonV1Client +func (w *wrapClient) TektonV1() typedtektonv1.TektonV1Interface { + return &wrapTektonV1{ + dyn: w.dyn, + } +} + +type wrapTektonV1 struct { + dyn dynamic.Interface +} + +func (w *wrapTektonV1) RESTClient() rest.Interface { + panic("RESTClient called on dynamic client!") +} + +func (w *wrapTektonV1) Tasks(namespace string) typedtektonv1.TaskInterface { + return &wrapTektonV1TaskImpl{ + dyn: w.dyn.Resource(schema.GroupVersionResource{ + Group: "tekton.dev", + Version: "v1", + Resource: "tasks", + }), + + namespace: namespace, + } +} + +type wrapTektonV1TaskImpl struct { + dyn dynamic.NamespaceableResourceInterface + + namespace string +} + +var _ typedtektonv1.TaskInterface = (*wrapTektonV1TaskImpl)(nil) + +func (w *wrapTektonV1TaskImpl) Create(ctx context.Context, in *pipelinev1.Task, opts v1.CreateOptions) (*pipelinev1.Task, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "Task", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Create(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.Task{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return w.dyn.Namespace(w.namespace).Delete(ctx, name, opts) +} + +func (w *wrapTektonV1TaskImpl) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + return w.dyn.Namespace(w.namespace).DeleteCollection(ctx, opts, listOpts) +} + +func (w *wrapTektonV1TaskImpl) Get(ctx context.Context, name string, opts v1.GetOptions) (*pipelinev1.Task, error) { + uo, err := w.dyn.Namespace(w.namespace).Get(ctx, name, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.Task{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) List(ctx context.Context, opts v1.ListOptions) (*pipelinev1.TaskList, error) { + uo, err := w.dyn.Namespace(w.namespace).List(ctx, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.TaskList{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *pipelinev1.Task, err error) { + uo, err := w.dyn.Namespace(w.namespace).Patch(ctx, name, pt, data, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.Task{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) Update(ctx context.Context, in *pipelinev1.Task, opts v1.UpdateOptions) (*pipelinev1.Task, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "Task", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).Update(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.Task{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) UpdateStatus(ctx context.Context, in *pipelinev1.Task, opts v1.UpdateOptions) (*pipelinev1.Task, error) { + in.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "tekton.dev", + Version: "v1", + Kind: "Task", + }) + uo := &unstructured.Unstructured{} + if err := convert(in, uo); err != nil { + return nil, err + } + uo, err := w.dyn.Namespace(w.namespace).UpdateStatus(ctx, uo, opts) + if err != nil { + return nil, err + } + out := &pipelinev1.Task{} + if err := convert(uo, out); err != nil { + return nil, err + } + return out, nil +} + +func (w *wrapTektonV1TaskImpl) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return nil, errors.New("NYI: Watch") +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go similarity index 60% rename from vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go index 6f636bfb71..ce81dddf5e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2019-2020 The Tekton Authors +Copyright 2020 The Tekton Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1alpha1 +// Code generated by lister-gen. DO NOT EDIT. -import ( - resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" -) +package v1 -// PipelineResourceTypeGCS is the subtype for the GCSResources, which is backed by a GCS blob/directory. -const PipelineResourceTypeGCS PipelineResourceType = resource.PipelineResourceTypeGCS +// TaskListerExpansion allows custom methods to be added to +// TaskLister. +type TaskListerExpansion interface{} + +// TaskNamespaceListerExpansion allows custom methods to be added to +// TaskNamespaceLister. +type TaskNamespaceListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/task.go similarity index 80% rename from vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/task.go index 2c60610afe..e918f2f895 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1/task.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by lister-gen. DO NOT EDIT. -package v1alpha1 +package v1 import ( - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" @@ -30,7 +30,7 @@ import ( type TaskLister interface { // List lists all Tasks in the indexer. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Task, err error) + List(selector labels.Selector) (ret []*v1.Task, err error) // Tasks returns an object that can list and get Tasks. Tasks(namespace string) TaskNamespaceLister TaskListerExpansion @@ -47,9 +47,9 @@ func NewTaskLister(indexer cache.Indexer) TaskLister { } // List lists all Tasks in the indexer. -func (s *taskLister) List(selector labels.Selector) (ret []*v1alpha1.Task, err error) { +func (s *taskLister) List(selector labels.Selector) (ret []*v1.Task, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Task)) + ret = append(ret, m.(*v1.Task)) }) return ret, err } @@ -64,10 +64,10 @@ func (s *taskLister) Tasks(namespace string) TaskNamespaceLister { type TaskNamespaceLister interface { // List lists all Tasks in the indexer for a given namespace. // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Task, err error) + List(selector labels.Selector) (ret []*v1.Task, err error) // Get retrieves the Task from the indexer for a given namespace and name. // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Task, error) + Get(name string) (*v1.Task, error) TaskNamespaceListerExpansion } @@ -79,21 +79,21 @@ type taskNamespaceLister struct { } // List lists all Tasks in the indexer for a given namespace. -func (s taskNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Task, err error) { +func (s taskNamespaceLister) List(selector labels.Selector) (ret []*v1.Task, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Task)) + ret = append(ret, m.(*v1.Task)) }) return ret, err } // Get retrieves the Task from the indexer for a given namespace and name. -func (s taskNamespaceLister) Get(name string) (*v1alpha1.Task, error) { +func (s taskNamespaceLister) Get(name string) (*v1.Task, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("task"), name) + return nil, errors.NewNotFound(v1.Resource("task"), name) } - return obj.(*v1alpha1.Task), nil + return obj.(*v1.Task), nil } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go deleted file mode 100644 index 7344d81a74..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterTaskLister helps list ClusterTasks. -// All objects returned here must be treated as read-only. -type ClusterTaskLister interface { - // List lists all ClusterTasks in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterTask, err error) - // Get retrieves the ClusterTask from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterTask, error) - ClusterTaskListerExpansion -} - -// clusterTaskLister implements the ClusterTaskLister interface. -type clusterTaskLister struct { - indexer cache.Indexer -} - -// NewClusterTaskLister returns a new ClusterTaskLister. -func NewClusterTaskLister(indexer cache.Indexer) ClusterTaskLister { - return &clusterTaskLister{indexer: indexer} -} - -// List lists all ClusterTasks in the indexer. -func (s *clusterTaskLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterTask, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterTask)) - }) - return ret, err -} - -// Get retrieves the ClusterTask from the index for a given name. -func (s *clusterTaskLister) Get(name string) (*v1alpha1.ClusterTask, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustertask"), name) - } - return obj.(*v1alpha1.ClusterTask), nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go index 6f4c92c04a..ef742ea349 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go @@ -18,26 +18,6 @@ limitations under the License. package v1alpha1 -// ClusterTaskListerExpansion allows custom methods to be added to -// ClusterTaskLister. -type ClusterTaskListerExpansion interface{} - -// PipelineListerExpansion allows custom methods to be added to -// PipelineLister. -type PipelineListerExpansion interface{} - -// PipelineNamespaceListerExpansion allows custom methods to be added to -// PipelineNamespaceLister. -type PipelineNamespaceListerExpansion interface{} - -// PipelineRunListerExpansion allows custom methods to be added to -// PipelineRunLister. -type PipelineRunListerExpansion interface{} - -// PipelineRunNamespaceListerExpansion allows custom methods to be added to -// PipelineRunNamespaceLister. -type PipelineRunNamespaceListerExpansion interface{} - // RunListerExpansion allows custom methods to be added to // RunLister. type RunListerExpansion interface{} @@ -45,19 +25,3 @@ type RunListerExpansion interface{} // RunNamespaceListerExpansion allows custom methods to be added to // RunNamespaceLister. type RunNamespaceListerExpansion interface{} - -// TaskListerExpansion allows custom methods to be added to -// TaskLister. -type TaskListerExpansion interface{} - -// TaskNamespaceListerExpansion allows custom methods to be added to -// TaskNamespaceLister. -type TaskNamespaceListerExpansion interface{} - -// TaskRunListerExpansion allows custom methods to be added to -// TaskRunLister. -type TaskRunListerExpansion interface{} - -// TaskRunNamespaceListerExpansion allows custom methods to be added to -// TaskRunNamespaceLister. -type TaskRunNamespaceListerExpansion interface{} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go deleted file mode 100644 index 80c75c9981..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PipelineLister helps list Pipelines. -// All objects returned here must be treated as read-only. -type PipelineLister interface { - // List lists all Pipelines in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) - // Pipelines returns an object that can list and get Pipelines. - Pipelines(namespace string) PipelineNamespaceLister - PipelineListerExpansion -} - -// pipelineLister implements the PipelineLister interface. -type pipelineLister struct { - indexer cache.Indexer -} - -// NewPipelineLister returns a new PipelineLister. -func NewPipelineLister(indexer cache.Indexer) PipelineLister { - return &pipelineLister{indexer: indexer} -} - -// List lists all Pipelines in the indexer. -func (s *pipelineLister) List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Pipeline)) - }) - return ret, err -} - -// Pipelines returns an object that can list and get Pipelines. -func (s *pipelineLister) Pipelines(namespace string) PipelineNamespaceLister { - return pipelineNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PipelineNamespaceLister helps list and get Pipelines. -// All objects returned here must be treated as read-only. -type PipelineNamespaceLister interface { - // List lists all Pipelines in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) - // Get retrieves the Pipeline from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.Pipeline, error) - PipelineNamespaceListerExpansion -} - -// pipelineNamespaceLister implements the PipelineNamespaceLister -// interface. -type pipelineNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all Pipelines in the indexer for a given namespace. -func (s pipelineNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Pipeline, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.Pipeline)) - }) - return ret, err -} - -// Get retrieves the Pipeline from the indexer for a given namespace and name. -func (s pipelineNamespaceLister) Get(name string) (*v1alpha1.Pipeline, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("pipeline"), name) - } - return obj.(*v1alpha1.Pipeline), nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go deleted file mode 100644 index 105871d970..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// PipelineRunLister helps list PipelineRuns. -// All objects returned here must be treated as read-only. -type PipelineRunLister interface { - // List lists all PipelineRuns in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PipelineRun, err error) - // PipelineRuns returns an object that can list and get PipelineRuns. - PipelineRuns(namespace string) PipelineRunNamespaceLister - PipelineRunListerExpansion -} - -// pipelineRunLister implements the PipelineRunLister interface. -type pipelineRunLister struct { - indexer cache.Indexer -} - -// NewPipelineRunLister returns a new PipelineRunLister. -func NewPipelineRunLister(indexer cache.Indexer) PipelineRunLister { - return &pipelineRunLister{indexer: indexer} -} - -// List lists all PipelineRuns in the indexer. -func (s *pipelineRunLister) List(selector labels.Selector) (ret []*v1alpha1.PipelineRun, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PipelineRun)) - }) - return ret, err -} - -// PipelineRuns returns an object that can list and get PipelineRuns. -func (s *pipelineRunLister) PipelineRuns(namespace string) PipelineRunNamespaceLister { - return pipelineRunNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// PipelineRunNamespaceLister helps list and get PipelineRuns. -// All objects returned here must be treated as read-only. -type PipelineRunNamespaceLister interface { - // List lists all PipelineRuns in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.PipelineRun, err error) - // Get retrieves the PipelineRun from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.PipelineRun, error) - PipelineRunNamespaceListerExpansion -} - -// pipelineRunNamespaceLister implements the PipelineRunNamespaceLister -// interface. -type pipelineRunNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all PipelineRuns in the indexer for a given namespace. -func (s pipelineRunNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PipelineRun, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.PipelineRun)) - }) - return ret, err -} - -// Get retrieves the PipelineRun from the indexer for a given namespace and name. -func (s pipelineRunNamespaceLister) Get(name string) (*v1alpha1.PipelineRun, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("pipelinerun"), name) - } - return obj.(*v1alpha1.PipelineRun), nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go deleted file mode 100644 index 933cc94286..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// TaskRunLister helps list TaskRuns. -// All objects returned here must be treated as read-only. -type TaskRunLister interface { - // List lists all TaskRuns in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.TaskRun, err error) - // TaskRuns returns an object that can list and get TaskRuns. - TaskRuns(namespace string) TaskRunNamespaceLister - TaskRunListerExpansion -} - -// taskRunLister implements the TaskRunLister interface. -type taskRunLister struct { - indexer cache.Indexer -} - -// NewTaskRunLister returns a new TaskRunLister. -func NewTaskRunLister(indexer cache.Indexer) TaskRunLister { - return &taskRunLister{indexer: indexer} -} - -// List lists all TaskRuns in the indexer. -func (s *taskRunLister) List(selector labels.Selector) (ret []*v1alpha1.TaskRun, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.TaskRun)) - }) - return ret, err -} - -// TaskRuns returns an object that can list and get TaskRuns. -func (s *taskRunLister) TaskRuns(namespace string) TaskRunNamespaceLister { - return taskRunNamespaceLister{indexer: s.indexer, namespace: namespace} -} - -// TaskRunNamespaceLister helps list and get TaskRuns. -// All objects returned here must be treated as read-only. -type TaskRunNamespaceLister interface { - // List lists all TaskRuns in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.TaskRun, err error) - // Get retrieves the TaskRun from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.TaskRun, error) - TaskRunNamespaceListerExpansion -} - -// taskRunNamespaceLister implements the TaskRunNamespaceLister -// interface. -type taskRunNamespaceLister struct { - indexer cache.Indexer - namespace string -} - -// List lists all TaskRuns in the indexer for a given namespace. -func (s taskRunNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.TaskRun, err error) { - err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.TaskRun)) - }) - return ret, err -} - -// Get retrieves the TaskRun from the indexer for a given namespace and name. -func (s taskRunNamespaceLister) Get(name string) (*v1alpha1.TaskRun, error) { - obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("taskrun"), name) - } - return obj.(*v1alpha1.TaskRun), nil -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/configmap.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/configmap.go new file mode 100644 index 0000000000..2a53124c2d --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/configmap.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "fmt" + "io/ioutil" + "testing" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/yaml" +) + +// ConfigMapFromTestFile creates a v1.ConfigMap from a YAML file +// It loads the YAML file from the testdata folder. +func ConfigMapFromTestFile(t *testing.T, name string) *corev1.ConfigMap { + t.Helper() + + b, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.yaml", name)) + if err != nil { + t.Fatalf("ReadFile() = %v", err) + } + + var cm corev1.ConfigMap + + // Use "sigs.k8s.io/yaml" since it reads json struct + // tags so things unmarshal properly + if err := yaml.Unmarshal(b, &cm); err != nil { + t.Fatalf("yaml.Unmarshal() = %v", err) + } + + return &cm +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/logger.go b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/logger.go new file mode 100644 index 0000000000..e53ee569dc --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/reconciler/testing/logger.go @@ -0,0 +1,73 @@ +package testing + +import ( + "context" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + filteredinformerfactory "knative.dev/pkg/client/injection/kube/informers/factory/filtered" + "knative.dev/pkg/injection" + logtesting "knative.dev/pkg/logging/testing" + + "github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" + "knative.dev/pkg/controller" + "knative.dev/pkg/logging" + + // Import for creating fake filtered factory in the test + _ "knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake" +) + +// SetupFakeContext sets up the the Context and the fake filtered informers for the tests. +func SetupFakeContext(t *testing.T) (context.Context, []controller.Informer) { + ctx, _, informer := setupFakeContextWithLabelKey(t) + cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{ + SendSuccessfully: true, + } + ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour) + return WithLogger(ctx, t), informer +} + +// SetupDefaultContext sets up the the Context and the default filtered informers for the tests. +func SetupDefaultContext(t *testing.T) (context.Context, []controller.Informer) { + ctx, _, informer := setupDefaultContextWithLabelKey(t) + cloudEventClientBehaviour := cloudevent.FakeClientBehaviour{ + SendSuccessfully: true, + } + ctx = cloudevent.WithClient(ctx, &cloudEventClientBehaviour) + return WithLogger(ctx, t), informer +} + +// WithLogger returns the the Logger +func WithLogger(ctx context.Context, t *testing.T) context.Context { + return logging.WithLogger(ctx, TestLogger(t)) +} + +// TestLogger sets up the the Logger +func TestLogger(t *testing.T) *zap.SugaredLogger { + logger := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())) + return logger.Sugar().Named(t.Name()) +} + +// setupFakeContextWithLabelKey sets up the the Context and the fake informers for the tests +// The provided context includes the FilteredInformerFactory LabelKey. +func setupFakeContextWithLabelKey(t zaptest.TestingT) (context.Context, context.CancelFunc, []controller.Informer) { + ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t)) + ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey) + ctx, is := injection.Fake.SetupInformers(ctx, &rest.Config{}) + return ctx, c, is +} + +// setupDefaultContextWithLabelKey sets up the the Context and the default informers for the tests +// The provided context includes the FilteredInformerFactory LabelKey. +func setupDefaultContextWithLabelKey(t zaptest.TestingT) (context.Context, context.CancelFunc, []controller.Informer) { + ctx, c := context.WithCancel(logtesting.TestContextWithLogger(t)) + ctx = controller.WithEventRecorder(ctx, record.NewFakeRecorder(1000)) + ctx = filteredinformerfactory.WithSelectors(ctx, v1beta1.ManagedByLabelKey) + ctx, is := injection.Default.SetupInformers(ctx, &rest.Config{}) + return ctx, c, is +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go new file mode 100644 index 0000000000..fe08f2cf48 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/config/config.go @@ -0,0 +1,68 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "sort" + "strings" +) + +// SpireConfig holds the images reference for a number of container images used +// across tektoncd pipelines. +type SpireConfig struct { + // The trust domain corresponds to the trust root of a SPIFFE identity provider. + TrustDomain string + // Path to the spire agent socket defined by the CSI driver + SocketPath string + // Spire server address + ServerAddr string + // Prefix to attach to the node name when registering it with the spire server + NodeAliasPrefix string + + // MockSpire only to be used for testing the controller, will not exhibit + // all characteristics of spire since it is only being used in the context + // of process memory. + MockSpire bool +} + +// Validate returns an error if any image is not set. +func (c SpireConfig) Validate() error { + var unset []string + for _, f := range []struct { + v, name string + }{ + {c.TrustDomain, "spire-trust-domain"}, + {c.SocketPath, "spire-socket-path"}, + {c.ServerAddr, "spire-server-addr"}, + {c.NodeAliasPrefix, "spire-node-alias-prefix"}, + } { + if f.v == "" { + unset = append(unset, f.name) + } + } + if len(unset) > 0 { + sort.Strings(unset) + return fmt.Errorf("found unset image flags: %s", unset) + } + + if !strings.HasPrefix(c.NodeAliasPrefix, "/") { + return fmt.Errorf("Spire node alias should start with a /") + } + + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/controller.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/controller.go new file mode 100644 index 0000000000..410c9c2ad6 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/controller.go @@ -0,0 +1,314 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "fmt" + "time" + + "github.com/pkg/errors" + "github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + entryv1 "github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1" + spiffetypes "github.com/spiffe/spire-api-sdk/proto/spire/api/types" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" + "knative.dev/pkg/logging" +) + +func init() { + injection.Default.RegisterClient(withControllerClient) +} + +// controllerKey is a way to associate the ControllerAPIClient from inside the context.Context +type controllerKey struct{} + +// GetControllerAPIClient extracts the ControllerAPIClient from the context. +func GetControllerAPIClient(ctx context.Context) ControllerAPIClient { + untyped := ctx.Value(controllerKey{}) + if untyped == nil { + logging.FromContext(ctx).Errorf("Unable to fetch client from context.") + return nil + } + return untyped.(*spireControllerAPIClient) +} + +func withControllerClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{}) +} + +type spireControllerAPIClient struct { + config *spireconfig.SpireConfig + serverConn *grpc.ClientConn + workloadConn *workloadapi.X509Source + entryClient entryv1.EntryClient + workloadAPI *workloadapi.Client +} + +func (sc *spireControllerAPIClient) setupClient(ctx context.Context) error { + if sc.config == nil { + return errors.New("config has not been set yet") + } + if sc.entryClient == nil || sc.workloadConn == nil || sc.workloadAPI == nil || sc.serverConn == nil { + return sc.dial(ctx) + } + return nil +} + +func (sc *spireControllerAPIClient) dial(ctx context.Context) error { + if sc.workloadConn == nil { + // Create X509Source - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + source, err := workloadapi.NewX509Source(ctx, workloadapi.WithClientOptions(workloadapi.WithAddr(sc.config.SocketPath))) + if err != nil { + return fmt.Errorf("unable to create X509Source for SPIFFE client: %w", err) + } + sc.workloadConn = source + } + + if sc.workloadAPI == nil { + // spire workloadapi client for controller - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + client, err := workloadapi.New(ctx, workloadapi.WithAddr(sc.config.SocketPath)) + if err != nil { + return fmt.Errorf("spire workload API not initialized due to error: %w", err) + } + sc.workloadAPI = client + } + + if sc.serverConn == nil { + // Create connection to spire server + tlsConfig := tlsconfig.MTLSClientConfig(sc.workloadConn, sc.workloadConn, tlsconfig.AuthorizeAny()) + conn, err := grpc.DialContext(ctx, sc.config.ServerAddr, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + if err != nil { + sc.workloadConn.Close() + sc.workloadConn = nil + return fmt.Errorf("unable to dial SPIRE server: %w", err) + } + sc.serverConn = conn + } + + if sc.entryClient == nil { + sc.entryClient = entryv1.NewEntryClient(sc.serverConn) + } + + return nil +} + +// SetConfig sets the spire configuration for ControllerAPIClient +func (sc *spireControllerAPIClient) SetConfig(c spireconfig.SpireConfig) { + sc.config = &c +} + +func (sc *spireControllerAPIClient) fetchControllerSVID(ctx context.Context) (*x509svid.SVID, error) { + xsvid, err := sc.workloadAPI.FetchX509SVID(ctx) + if err != nil { + return nil, fmt.Errorf("failed to fetch controller SVID: %w", err) + } + return xsvid, nil +} + +func (sc *spireControllerAPIClient) nodeEntry(nodeName string) *spiffetypes.Entry { + selectors := []*spiffetypes.Selector{ + { + Type: "k8s_psat", + Value: "agent_ns:spire", + }, + { + Type: "k8s_psat", + Value: "agent_node_name:" + nodeName, + }, + } + + return &spiffetypes.Entry{ + SpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, nodeName), + }, + ParentId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: "/spire/server", + }, + Selectors: selectors, + } +} + +func (sc *spireControllerAPIClient) workloadEntry(tr *v1beta1.TaskRun, pod *corev1.Pod, expiry int64) *spiffetypes.Entry { + // Note: We can potentially add attestation on the container images as well since + // the information is available here. + selectors := []*spiffetypes.Selector{ + { + Type: "k8s", + Value: "pod-uid:" + string(pod.UID), + }, + { + Type: "k8s", + Value: "pod-name:" + pod.Name, + }, + } + + return &spiffetypes.Entry{ + SpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name), + }, + ParentId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("%v%v", sc.config.NodeAliasPrefix, pod.Spec.NodeName), + }, + Selectors: selectors, + ExpiresAt: expiry, + } +} + +// ttl is the TTL for the SPIRE entry in seconds, not the SVID TTL +func (sc *spireControllerAPIClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + expiryTime := time.Now().Unix() + int64(ttl) + entries := []*spiffetypes.Entry{ + sc.nodeEntry(pod.Spec.NodeName), + sc.workloadEntry(tr, pod, expiryTime), + } + + req := entryv1.BatchCreateEntryRequest{ + Entries: entries, + } + + resp, err := sc.entryClient.BatchCreateEntry(ctx, &req) + if err != nil { + return err + } + + if len(resp.Results) != len(entries) { + return fmt.Errorf("batch create entry failed, malformed response expected %v result", len(entries)) + } + + var errPaths []string + var errCodes []int32 + + for _, r := range resp.Results { + if codes.Code(r.Status.Code) != codes.AlreadyExists && + codes.Code(r.Status.Code) != codes.OK { + errPaths = append(errPaths, r.Entry.SpiffeId.Path) + errCodes = append(errCodes, r.Status.Code) + } + } + + if len(errPaths) != 0 { + return fmt.Errorf("batch create entry failed for entries %+v with codes %+v", errPaths, errCodes) + } + return nil +} + +func (sc *spireControllerAPIClient) getEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) ([]*spiffetypes.Entry, error) { + req := &entryv1.ListEntriesRequest{ + Filter: &entryv1.ListEntriesRequest_Filter{ + BySpiffeId: &spiffetypes.SPIFFEID{ + TrustDomain: sc.config.TrustDomain, + Path: fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name), + }, + }, + } + + entries := []*spiffetypes.Entry{} + for { + resp, err := sc.entryClient.ListEntries(ctx, req) + if err != nil { + return nil, err + } + + entries = append(entries, resp.Entries...) + + if resp.NextPageToken == "" { + break + } + + req.PageToken = resp.NextPageToken + } + + return entries, nil +} + +func (sc *spireControllerAPIClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error { + entries, err := sc.getEntries(ctx, tr, pod) + if err != nil { + return err + } + + var ids []string + for _, e := range entries { + ids = append(ids, e.Id) + } + + req := &entryv1.BatchDeleteEntryRequest{ + Ids: ids, + } + resp, err := sc.entryClient.BatchDeleteEntry(ctx, req) + if err != nil { + return err + } + + var errIds []string + var errCodes []int32 + + for _, r := range resp.Results { + if codes.Code(r.Status.Code) != codes.NotFound && + codes.Code(r.Status.Code) != codes.OK { + errIds = append(errIds, r.Id) + errCodes = append(errCodes, r.Status.Code) + } + } + + if len(errIds) != 0 { + return fmt.Errorf("batch delete entry failed for ids %+v with codes %+v", errIds, errCodes) + } + + return nil +} + +func (sc *spireControllerAPIClient) Close() error { + var err error + if sc.serverConn != nil { + err = sc.serverConn.Close() + if err != nil { + return err + } + } + if sc.workloadAPI != nil { + err = sc.workloadAPI.Close() + if err != nil { + return err + } + } + if sc.workloadConn != nil { + err = sc.workloadConn.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/entrypointer.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/entrypointer.go new file mode 100644 index 0000000000..33b529568f --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/entrypointer.go @@ -0,0 +1,86 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "time" + + "github.com/pkg/errors" + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/spiffe/go-spiffe/v2/workloadapi" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" +) + +// NewEntrypointerAPIClient creates the EntrypointerAPIClient +func NewEntrypointerAPIClient(c *spireconfig.SpireConfig) EntrypointerAPIClient { + return &spireEntrypointerAPIClient{ + config: c, + } +} + +type spireEntrypointerAPIClient struct { + config *spireconfig.SpireConfig + client *workloadapi.Client +} + +func (w *spireEntrypointerAPIClient) setupClient(ctx context.Context) error { + if w.config == nil { + return errors.New("config has not been set yet") + } + if w.client == nil { + return w.dial(ctx) + } + return nil +} + +func (w *spireEntrypointerAPIClient) dial(ctx context.Context) error { + // spire workloadapi client for entrypoint - https://github.com/spiffe/go-spiffe/blob/main/v2/workloadapi/client.go + client, err := workloadapi.New(ctx, workloadapi.WithAddr(w.config.SocketPath)) + if err != nil { + return errors.Wrap(err, "spire workload API not initialized due to error") + } + w.client = client + return nil +} + +func (w *spireEntrypointerAPIClient) getWorkloadSVID(ctx context.Context) (*x509svid.SVID, error) { + backOff := 2 + var xsvid *x509svid.SVID + var err error + for i := 0; i < 20; i += backOff { + xsvid, err = w.client.FetchX509SVID(ctx) + if err == nil { + break + } + time.Sleep(time.Duration(backOff) * time.Second) + } + if xsvid != nil && len(xsvid.Certificates) > 0 { + return xsvid, nil + } + return nil, errors.Wrap(err, "requested SVID failed to get fetched and timed out") +} + +func (w *spireEntrypointerAPIClient) Close() error { + if w.client != nil { + err := w.client.Close() + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/sign.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/sign.go new file mode 100644 index 0000000000..730d0c4178 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/sign.go @@ -0,0 +1,148 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/pem" + "strings" + + "github.com/spiffe/go-spiffe/v2/svid/x509svid" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +// Signs the TaskRun results with the TaskRun spire SVID and appends the results to PipelineResourceResult +func (w *spireEntrypointerAPIClient) Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + err := w.setupClient(ctx) + if err != nil { + return nil, err + } + + xsvid, err := w.getWorkloadSVID(ctx) + if err != nil { + return nil, err + } + + output := []v1beta1.PipelineResourceResult{} + p := pem.EncodeToMemory(&pem.Block{ + Bytes: xsvid.Certificates[0].Raw, + Type: "CERTIFICATE", + }) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeySVID, + Value: string(p), + ResultType: v1beta1.TaskRunResultType, + }) + + for _, r := range results { + if r.ResultType == v1beta1.TaskRunResultType { + resultValue, err := getResultValue(r) + if err != nil { + return nil, err + } + s, err := signWithKey(xsvid, resultValue) + if err != nil { + return nil, err + } + output = append(output, v1beta1.PipelineResourceResult{ + Key: r.Key + KeySignatureSuffix, + Value: base64.StdEncoding.EncodeToString(s), + ResultType: v1beta1.TaskRunResultType, + }) + } + } + // get complete manifest of keys such that it can be verified + manifest := getManifest(results) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest, + Value: manifest, + ResultType: v1beta1.TaskRunResultType, + }) + manifestSig, err := signWithKey(xsvid, manifest) + if err != nil { + return nil, err + } + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest + KeySignatureSuffix, + Value: base64.StdEncoding.EncodeToString(manifestSig), + ResultType: v1beta1.TaskRunResultType, + }) + + return output, nil +} + +func signWithKey(xsvid *x509svid.SVID, value string) ([]byte, error) { + dgst := sha256.Sum256([]byte(value)) + s, err := xsvid.PrivateKey.Sign(rand.Reader, dgst[:], crypto.SHA256) + if err != nil { + return nil, err + } + return s, nil +} + +func getManifest(results []v1beta1.PipelineResourceResult) string { + keys := []string{} + for _, r := range results { + if strings.HasSuffix(r.Key, KeySignatureSuffix) || r.Key == KeySVID || r.ResultType != v1beta1.TaskRunResultType { + continue + } + keys = append(keys, r.Key) + } + return strings.Join(keys, ",") +} + +// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash +func (sc *spireControllerAPIClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + // Add status hash + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + + // Sign with controller private key + xsvid, err := sc.fetchControllerSVID(ctx) + if err != nil { + return err + } + + sig, err := signWithKey(xsvid, currentHash) + if err != nil { + return err + } + + // Store Controller SVID + p := pem.EncodeToMemory(&pem.Block{ + Bytes: xsvid.Certificates[0].Raw, + Type: "CERTIFICATE", + }) + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[controllerSvidAnnotation] = string(p) + tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash + tr.Status.Annotations[taskRunStatusHashSigAnnotation] = base64.StdEncoding.EncodeToString(sig) + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/spire.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/spire.go new file mode 100644 index 0000000000..708ada1685 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/spire.go @@ -0,0 +1,75 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// The spire package is used to interact with the Spire server and Spire agent respectively. +// The pipeline controller (once registered) is able to create and delete entries in the Spire server +// for the various TaskRuns that it instantiates. The TaskRun is able to attest to the Spire agent +// and obtains the valid SVID (SPIFFE Verifiable Identity Document) to sign the TaskRun results. +// Separately, the pipeline controller SVID is used to sign the TaskRun Status to validate no modification +// during the TaskRun execution. Each TaskRun result and status is verified and validated once the +// TaskRun execution is completed. Tekton Chains will also validate the results and status before +// signing and creating attestation for the TaskRun. +package spire + +import ( + "context" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +const ( + // TaskRunStatusHashAnnotation TaskRun status annotation Hash Key + TaskRunStatusHashAnnotation = "tekton.dev/status-hash" + // taskRunStatusHashSigAnnotation TaskRun status annotation hash signature Key + taskRunStatusHashSigAnnotation = "tekton.dev/status-hash-sig" + // controllerSvidAnnotation TaskRun status annotation controller SVID Key + controllerSvidAnnotation = "tekton.dev/controller-svid" + // NotVerifiedAnnotation TaskRun status annotation not verified by spire key that get set when status match fails + NotVerifiedAnnotation = "tekton.dev/not-verified" + // KeySVID key used by TaskRun SVID + KeySVID = "SVID" + // KeySignatureSuffix is the suffix of the keys that contain signatures + KeySignatureSuffix = ".sig" + // KeyResultManifest key used to get the result manifest from the results + KeyResultManifest = "RESULT_MANIFEST" + // WorkloadAPI is the name of the SPIFFE/SPIRE CSI Driver volume + WorkloadAPI = "spiffe-workload-api" + // VolumeMountPath is the volume mount in the the pods to access the SPIFFE/SPIRE agent workload API + VolumeMountPath = "/spiffe-workload-api" +) + +// ControllerAPIClient interface maps to the spire controller API to interact with spire +type ControllerAPIClient interface { + AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error + CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool + Close() error + CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error + DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error + VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error + VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error + SetConfig(c spireconfig.SpireConfig) +} + +// EntrypointerAPIClient interface maps to the spire entrypointer API to interact with spire +type EntrypointerAPIClient interface { + Close() error + // Sign returns the signature material to be put in the PipelineResourceResult to append to the output results + Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/spire_mock.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/spire_mock.go new file mode 100644 index 0000000000..bc878ff34e --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/spire_mock.go @@ -0,0 +1,314 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + spireconfig "github.com/tektoncd/pipeline/pkg/spire/config" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/rest" + "knative.dev/pkg/injection" +) + +func init() { + injection.Fake.RegisterClient(withFakeControllerClient) +} + +func withFakeControllerClient(ctx context.Context, cfg *rest.Config) context.Context { + return context.WithValue(ctx, controllerKey{}, &spireControllerAPIClient{}) +} + +// MockClient is a client used for mocking the this package for unit testing +// other tekton components that use the spire entrypointer or controller client. +// +// The MockClient implements both SpireControllerApiClient and SpireEntrypointerApiClient +// and in addition to that provides the helper functions to define and query internal state. +type MockClient struct { + // Entries is a dictionary of entries that mock the SPIRE server datastore (for function Sign only) + Entries map[string]bool + + // SignIdentities represents the list of identities to use to sign (providing context of a caller to Sign) + // when Sign is called, the identity is dequeued from the slice. A signature will only be provided if the + // corresponding entry is in Entries. This only takes effect if SignOverride is nil. + SignIdentities []string + + // VerifyAlwaysReturns defines whether to always verify successfully or to always fail verification if non-nil. + // This only take effect on Verify functions: + // - VerifyStatusInternalAnnotationOverride + // - VerifyTaskRunResultsOverride + VerifyAlwaysReturns *bool + + // VerifyStatusInternalAnnotationOverride contains the function to overwrite a call to VerifyStatusInternalAnnotation + VerifyStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error + + // VerifyTaskRunResultsOverride contains the function to overwrite a call to VerifyTaskRunResults + VerifyTaskRunResultsOverride func(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error + + // AppendStatusInternalAnnotationOverride contains the function to overwrite a call to AppendStatusInternalAnnotation + AppendStatusInternalAnnotationOverride func(ctx context.Context, tr *v1beta1.TaskRun) error + + // CheckSpireVerifiedFlagOverride contains the function to overwrite a call to CheckSpireVerifiedFlag + CheckSpireVerifiedFlagOverride func(tr *v1beta1.TaskRun) bool + + // SignOverride contains the function to overwrite a call to Sign + SignOverride func(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) +} + +const ( + controllerSvid = "CONTROLLER_SVID_DATA" +) + +func (*MockClient) mockSign(content, signedBy string) string { + return fmt.Sprintf("signed-by-%s:%x", signedBy, sha256.Sum256([]byte(content))) +} + +func (sc *MockClient) mockVerify(content, sig, signedBy string) bool { + return sig == sc.mockSign(content, signedBy) +} + +// GetIdentity get the taskrun namespace and taskrun name that is used for signing and verifying in mocked spire +func (*MockClient) GetIdentity(tr *v1beta1.TaskRun) string { + return fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) +} + +// AppendStatusInternalAnnotation creates the status annotations which are used by the controller to verify the status hash +func (sc *MockClient) AppendStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun) error { + if sc.AppendStatusInternalAnnotationOverride != nil { + return sc.AppendStatusInternalAnnotationOverride(ctx, tr) + } + // Add status hash + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + + if tr.Status.Annotations == nil { + tr.Status.Annotations = map[string]string{} + } + tr.Status.Annotations[controllerSvidAnnotation] = controllerSvid + tr.Status.Annotations[TaskRunStatusHashAnnotation] = currentHash + tr.Status.Annotations[taskRunStatusHashSigAnnotation] = sc.mockSign(currentHash, "controller") + return nil +} + +// CheckSpireVerifiedFlag checks if the not-verified status annotation is set which would result in spire verification failed +func (sc *MockClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool { + if sc.CheckSpireVerifiedFlagOverride != nil { + return sc.CheckSpireVerifiedFlagOverride(tr) + } + + if _, notVerified := tr.Status.Annotations[NotVerifiedAnnotation]; !notVerified { + return true + } + return false +} + +// CreateEntries adds entries to the dictionary of entries that mock the SPIRE server datastore +func (sc *MockClient) CreateEntries(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod, ttl time.Duration) error { + id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) + if sc.Entries == nil { + sc.Entries = map[string]bool{} + } + sc.Entries[id] = true + return nil +} + +// DeleteEntry removes the entry from the dictionary of entries that mock the SPIRE server datastore +func (sc *MockClient) DeleteEntry(ctx context.Context, tr *v1beta1.TaskRun, pod *corev1.Pod) error { + id := fmt.Sprintf("/ns/%v/taskrun/%v", tr.Namespace, tr.Name) + if sc.Entries != nil { + delete(sc.Entries, id) + } + return nil +} + +// VerifyStatusInternalAnnotation checks that the internal status annotations are valid by the mocked spire client +func (sc *MockClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error { + if sc.VerifyStatusInternalAnnotationOverride != nil { + return sc.VerifyStatusInternalAnnotationOverride(ctx, tr, logger) + } + + if sc.VerifyAlwaysReturns != nil { + if *sc.VerifyAlwaysReturns { + return nil + } + return errors.New("failed to verify from mock VerifyAlwaysReturns") + } + + if !sc.CheckSpireVerifiedFlag(tr) { + return errors.New("annotation tekton.dev/not-verified = yes failed spire verification") + } + + annotations := tr.Status.Annotations + + // Verify annotations are there + if annotations[controllerSvidAnnotation] != controllerSvid { + return errors.New("svid annotation missing") + } + + // Check signature + currentHash, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + if !sc.mockVerify(currentHash, annotations[taskRunStatusHashSigAnnotation], "controller") { + return errors.New("signature was not able to be verified") + } + + // check current status hash vs annotation status hash by controller + if err := CheckStatusInternalAnnotation(tr); err != nil { + return err + } + + return nil +} + +// VerifyTaskRunResults checks that all the TaskRun results are valid by the mocked spire client +func (sc *MockClient) VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error { + if sc.VerifyTaskRunResultsOverride != nil { + return sc.VerifyTaskRunResultsOverride(ctx, prs, tr) + } + + if sc.VerifyAlwaysReturns != nil { + if *sc.VerifyAlwaysReturns { + return nil + } + return errors.New("failed to verify from mock VerifyAlwaysReturns") + } + + resultMap := map[string]v1beta1.PipelineResourceResult{} + for _, r := range prs { + if r.ResultType == v1beta1.TaskRunResultType { + resultMap[r.Key] = r + } + } + + var identity string + // Get SVID identity + for k, p := range resultMap { + if k == KeySVID { + identity = p.Value + break + } + } + + // Verify manifest + if err := verifyManifest(resultMap); err != nil { + return err + } + + if identity != sc.GetIdentity(tr) { + return errors.New("mock identity did not match") + } + + for key, r := range resultMap { + if strings.HasSuffix(key, KeySignatureSuffix) { + continue + } + if key == KeySVID { + continue + } + + sigEntry, ok := resultMap[key+KeySignatureSuffix] + sigValue, err := getResultValue(sigEntry) + if err != nil { + return err + } + resultValue, err := getResultValue(r) + if err != nil { + return err + } + if !ok || !sc.mockVerify(resultValue, sigValue, identity) { + return errors.Errorf("failed to verify field: %v", key) + } + } + + return nil +} + +// Sign signs and appends signatures to the PipelineResourceResult based on the mocked spire client +func (sc *MockClient) Sign(ctx context.Context, results []v1beta1.PipelineResourceResult) ([]v1beta1.PipelineResourceResult, error) { + if sc.SignOverride != nil { + return sc.SignOverride(ctx, results) + } + + if len(sc.SignIdentities) == 0 { + return nil, errors.New("signIdentities empty, please provide identities to sign with the MockClient.GetIdentity function") + } + + identity := sc.SignIdentities[0] + sc.SignIdentities = sc.SignIdentities[1:] + + if !sc.Entries[identity] { + return nil, errors.Errorf("entry doesn't exist for identity: %v", identity) + } + + output := []v1beta1.PipelineResourceResult{} + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeySVID, + Value: identity, + ResultType: v1beta1.TaskRunResultType, + }) + + for _, r := range results { + if r.ResultType == v1beta1.TaskRunResultType { + resultValue, err := getResultValue(r) + if err != nil { + return nil, err + } + s := sc.mockSign(resultValue, identity) + output = append(output, v1beta1.PipelineResourceResult{ + Key: r.Key + KeySignatureSuffix, + Value: s, + ResultType: v1beta1.TaskRunResultType, + }) + } + } + // get complete manifest of keys such that it can be verified + manifest := getManifest(results) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest, + Value: manifest, + ResultType: v1beta1.TaskRunResultType, + }) + manifestSig := sc.mockSign(manifest, identity) + output = append(output, v1beta1.PipelineResourceResult{ + Key: KeyResultManifest + KeySignatureSuffix, + Value: manifestSig, + ResultType: v1beta1.TaskRunResultType, + }) + + return output, nil +} + +// Close mock closing the spire client connection +func (sc *MockClient) Close() error { + return nil +} + +// SetConfig sets the spire configuration for MockClient +func (sc *MockClient) SetConfig(c spireconfig.SpireConfig) { + return +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/spire/verify.go b/vendor/github.com/tektoncd/pipeline/pkg/spire/verify.go new file mode 100644 index 0000000000..b32b7489bd --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/pkg/spire/verify.go @@ -0,0 +1,354 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spire + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "sort" + "strings" + + "github.com/pkg/errors" + "go.uber.org/zap" + + "github.com/spiffe/go-spiffe/v2/workloadapi" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" +) + +// VerifyTaskRunResults ensures that the TaskRun results are valid and have not been tampered with +func (sc *spireControllerAPIClient) VerifyTaskRunResults(ctx context.Context, prs []v1beta1.PipelineResourceResult, tr *v1beta1.TaskRun) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + resultMap := map[string]v1beta1.PipelineResourceResult{} + for _, r := range prs { + if r.ResultType == v1beta1.TaskRunResultType { + resultMap[r.Key] = r + } + } + + cert, err := getSVID(resultMap) + if err != nil { + return err + } + + trust, err := getTrustBundle(ctx, sc.workloadAPI) + if err != nil { + return err + } + + if err := verifyManifest(resultMap); err != nil { + return err + } + + if err := verifyCertURI(cert, tr, sc.config.TrustDomain); err != nil { + return err + } + + if err := verifyCertificateTrust(cert, trust); err != nil { + return err + } + + for key := range resultMap { + if strings.HasSuffix(key, KeySignatureSuffix) { + continue + } + if key == KeySVID { + continue + } + if err := verifyResult(cert.PublicKey, key, resultMap); err != nil { + return err + } + } + + return nil +} + +// VerifyStatusInternalAnnotation run multuple verification steps to ensure that the spire status annotations are valid +func (sc *spireControllerAPIClient) VerifyStatusInternalAnnotation(ctx context.Context, tr *v1beta1.TaskRun, logger *zap.SugaredLogger) error { + err := sc.setupClient(ctx) + if err != nil { + return err + } + + if !sc.CheckSpireVerifiedFlag(tr) { + return errors.New("annotation tekton.dev/not-verified = yes failed spire verification") + } + + annotations := tr.Status.Annotations + + // get trust bundle from spire server + trust, err := getTrustBundle(ctx, sc.workloadAPI) + if err != nil { + return err + } + + // verify controller SVID + svid, ok := annotations[controllerSvidAnnotation] + if !ok { + return errors.New("No SVID found") + } + block, _ := pem.Decode([]byte(svid)) + if block == nil { + return fmt.Errorf("invalid SVID: %w", err) + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return fmt.Errorf("invalid SVID: %w", err) + } + + // verify certificate root of trust + if err := verifyCertificateTrust(cert, trust); err != nil { + return err + } + logger.Infof("Successfully verified certificate %s against SPIRE", svid) + + if err := verifyAnnotation(cert.PublicKey, annotations); err != nil { + return err + } + logger.Info("Successfully verified signature") + + // CheckStatusInternalAnnotation check current status hash vs annotation status hash by controller + if err := CheckStatusInternalAnnotation(tr); err != nil { + return err + } + logger.Info("Successfully verified status annotation hash matches the current taskrun status") + + return nil +} + +// CheckSpireVerifiedFlag checks if the not-verified status annotation is set which would result in spire verification failed +func (sc *spireControllerAPIClient) CheckSpireVerifiedFlag(tr *v1beta1.TaskRun) bool { + if _, notVerified := tr.Status.Annotations[NotVerifiedAnnotation]; !notVerified { + return true + } + return false +} + +func hashTaskrunStatusInternal(tr *v1beta1.TaskRun) (string, error) { + s, err := json.Marshal(tr.Status.TaskRunStatusFields) + if err != nil { + return "", err + } + return fmt.Sprintf("%x", sha256.Sum256(s)), nil +} + +// CheckStatusInternalAnnotation ensures that the internal status annotation hash and current status hash match +func CheckStatusInternalAnnotation(tr *v1beta1.TaskRun) error { + // get stored hash of status + annotations := tr.Status.Annotations + hash, ok := annotations[TaskRunStatusHashAnnotation] + if !ok { + return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation) + } + // get current hash of status + current, err := hashTaskrunStatusInternal(tr) + if err != nil { + return err + } + if hash != current { + return fmt.Errorf("current status hash and stored annotation hash does not match! Annotation Hash: %s, Current Status Hash: %s", hash, current) + } + + return nil +} + +func getSVID(resultMap map[string]v1beta1.PipelineResourceResult) (*x509.Certificate, error) { + svid, ok := resultMap[KeySVID] + if !ok { + return nil, errors.New("no SVID found") + } + svidValue, err := getResultValue(svid) + if err != nil { + return nil, err + } + block, _ := pem.Decode([]byte(svidValue)) + if block == nil { + return nil, fmt.Errorf("invalid SVID: %w", err) + } + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return nil, fmt.Errorf("invalid SVID: %w", err) + } + return cert, nil +} + +func getTrustBundle(ctx context.Context, client *workloadapi.Client) (*x509.CertPool, error) { + x509set, err := client.FetchX509Bundles(ctx) + if err != nil { + return nil, err + } + x509Bundle := x509set.Bundles() + if err != nil { + return nil, err + } + if len(x509Bundle) > 0 { + trustPool := x509.NewCertPool() + for _, bundle := range x509Bundle { + for _, c := range bundle.X509Authorities() { + trustPool.AddCert(c) + } + return trustPool, nil + } + } + return nil, errors.Wrap(err, "trust domain bundle empty") +} + +func getFullPath(tr *v1beta1.TaskRun) string { + // URI:spiffe://example.org/ns/default/taskrun/cache-image-pipelinerun-r4r22-fetch-from-git + return fmt.Sprintf("/ns/%s/taskrun/%s", tr.Namespace, tr.Name) +} + +func verifyCertURI(cert *x509.Certificate, tr *v1beta1.TaskRun, trustDomain string) error { + path := getFullPath(tr) + switch { + case len(cert.URIs) == 0: + return fmt.Errorf("cert uri missing for taskrun: %s", tr.Name) + case len(cert.URIs) > 1: + return fmt.Errorf("cert contains more than one URI for taskrun: %s", tr.Name) + case len(cert.URIs) == 1: + if cert.URIs[0].Host != trustDomain { + return fmt.Errorf("cert uri: %s does not match trust domain: %s", cert.URIs[0].Host, trustDomain) + } + if cert.URIs[0].Path != path { + return fmt.Errorf("cert uri: %s does not match taskrun: %s", cert.URIs[0].Path, path) + } + } + return nil +} + +func verifyCertificateTrust(cert *x509.Certificate, rootCertPool *x509.CertPool) error { + verifyOptions := x509.VerifyOptions{ + Roots: rootCertPool, + } + chains, err := cert.Verify(verifyOptions) + if len(chains) == 0 || err != nil { + return errors.New("cert cannot be verified by provided roots") + } + return nil +} + +func verifyManifest(results map[string]v1beta1.PipelineResourceResult) error { + manifest, ok := results[KeyResultManifest] + if !ok { + return errors.New("no manifest found in results") + } + manifestValue, err := getResultValue(manifest) + if err != nil { + return err + } + s := strings.Split(manifestValue, ",") + for _, key := range s { + _, found := results[key] + if key != "" && !found { + return fmt.Errorf("no result found for %s but is part of the manifest %s", key, manifestValue) + } + } + return nil +} + +func verifyAnnotation(pub interface{}, annotations map[string]string) error { + signature, ok := annotations[taskRunStatusHashSigAnnotation] + if !ok { + return fmt.Errorf("no signature found for %s", taskRunStatusHashSigAnnotation) + } + hash, ok := annotations[TaskRunStatusHashAnnotation] + if !ok { + return fmt.Errorf("no annotation status hash found for %s", TaskRunStatusHashAnnotation) + } + return verifySignature(pub, signature, hash) +} + +func verifyResult(pub crypto.PublicKey, key string, results map[string]v1beta1.PipelineResourceResult) error { + signature, ok := results[key+KeySignatureSuffix] + if !ok { + return fmt.Errorf("no signature found for %s", key) + } + sigValue, err := getResultValue(signature) + if err != nil { + return err + } + resultValue, err := getResultValue(results[key]) + if err != nil { + return err + } + return verifySignature(pub, sigValue, resultValue) +} + +func verifySignature(pub crypto.PublicKey, signature string, value string) error { + b, err := base64.StdEncoding.DecodeString(signature) + if err != nil { + return fmt.Errorf("invalid signature: %w", err) + } + h := sha256.Sum256([]byte(value)) + // Check val against sig + switch t := pub.(type) { + case *ecdsa.PublicKey: + if !ecdsa.VerifyASN1(t, h[:], b) { + return errors.New("invalid signature") + } + return nil + case *rsa.PublicKey: + return rsa.VerifyPKCS1v15(t, crypto.SHA256, h[:], b) + case ed25519.PublicKey: + if !ed25519.Verify(t, []byte(value), b) { + return errors.New("invalid signature") + } + return nil + default: + return fmt.Errorf("unsupported key type: %s", t) + } +} + +func getResultValue(result v1beta1.PipelineResourceResult) (string, error) { + aos := v1beta1.ArrayOrString{} + err := aos.UnmarshalJSON([]byte(result.Value)) + valList := []string{} + if err != nil { + return "", fmt.Errorf("unmarshal error for key: %s", result.Key) + } + switch aos.Type { + case v1beta1.ParamTypeString: + return aos.StringVal, nil + case v1beta1.ParamTypeArray: + valList = append(valList, aos.ArrayVal...) + return strings.Join(valList, ","), nil + case v1beta1.ParamTypeObject: + keys := make([]string, len(aos.ObjectVal)) + for k := range aos.ObjectVal { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + valList = append(valList, k) + valList = append(valList, aos.ObjectVal[k]) + } + return strings.Join(valList, ","), nil + } + return "", fmt.Errorf("invalid result type for key: %s", result.Key) +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go index 62cef88f83..f98d03569b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go @@ -19,15 +19,34 @@ package substitution import ( "fmt" "regexp" + "strconv" "strings" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/apis" ) -const parameterSubstitution = `[_a-zA-Z][_a-zA-Z0-9.-]*(\[\*\])?` +const ( + parameterSubstitution = `.*?(\[\*\])?` -const braceMatchingRegex = "(\\$(\\(%s(\\.(?P%s)|\\[\"(?P%s)\"\\]|\\['(?P%s)'\\])\\)))" + // braceMatchingRegex is a regex for parameter references including dot notation, bracket notation with single and double quotes. + braceMatchingRegex = "(\\$(\\(%s(\\.(?P%s)|\\[\"(?P%s)\"\\]|\\['(?P%s)'\\])\\)))" + // arrayIndexing will match all `[int]` and `[*]` for parseExpression + arrayIndexing = `\[([0-9])*\*?\]` + // paramIndex will match all `$(params.paramName[int])` expressions + paramIndexing = `\$\(params(\.[_a-zA-Z0-9.-]+|\[\'[_a-zA-Z0-9.-\/]+\'\]|\[\"[_a-zA-Z0-9.-\/]+\"\])\[[0-9]+\]\)` + // intIndex will match all `[int]` expressions + intIndex = `\[[0-9]+\]` +) + +// arrayIndexingRegex is used to match `[int]` and `[*]` +var arrayIndexingRegex = regexp.MustCompile(arrayIndexing) + +// paramIndexingRegex will match all `$(params.paramName[int])` expressions +var paramIndexingRegex = regexp.MustCompile(paramIndexing) + +// intIndexRegex will match all `[int]` for param expression +var intIndexRegex = regexp.MustCompile(intIndex) // ValidateVariable makes sure all variables in the provided string are known func ValidateVariable(name, value, prefix, locationName, path string, vars sets.String) *apis.FieldError { @@ -56,7 +75,7 @@ func ValidateVariableP(value, prefix string, vars sets.String) *apis.FieldError } for _, v := range vs { - v = strings.TrimSuffix(v, "[*]") + v = TrimArrayIndex(v) if !vars.Has(v) { return &apis.FieldError{ Message: fmt.Sprintf("non-existent variable in %q", value), @@ -180,6 +199,29 @@ func ValidateVariableIsolatedP(value, prefix string, vars sets.String) *apis.Fie return nil } +// ValidateWholeArrayOrObjectRefInStringVariable validates if a single string field uses references to the whole array/object appropriately +// valid example: "$(params.myObject[*])" +// invalid example: "$(params.name-not-exist[*])" +func ValidateWholeArrayOrObjectRefInStringVariable(name, value, prefix string, vars sets.String) (isIsolated bool, errs *apis.FieldError) { + nameSubstitution := `[_a-zA-Z0-9.-]+\[\*\]` + + // a regex to check if the stringValue is an isolated reference to the whole array/object param without extra string literal. + isolatedVariablePattern := fmt.Sprintf(fmt.Sprintf("^%s$", braceMatchingRegex), prefix, nameSubstitution, nameSubstitution, nameSubstitution) + isolatedVariableRegex, err := regexp.Compile(isolatedVariablePattern) + if err != nil { + return false, &apis.FieldError{ + Message: fmt.Sprint("Fail to parse the regex: ", err), + Paths: []string{fmt.Sprintf("%s.%s", prefix, name)}, + } + } + + if isolatedVariableRegex.MatchString(value) { + return true, ValidateVariableP(value, prefix, vars).ViaFieldKey(prefix, name) + } + + return false, nil +} + // Extract a the first full string expressions found (e.g "$(input.params.foo)"). Return // "" and false if nothing is found. func extractExpressionFromString(s, prefix string) (string, bool) { @@ -299,3 +341,28 @@ func ApplyArrayReplacements(in string, stringReplacements map[string]string, arr // Otherwise return a size-1 array containing the input string with standard stringReplacements applied. return []string{ApplyReplacements(in, stringReplacements)} } + +// TrimArrayIndex replaces all `[i]` and `[*]` to "". +func TrimArrayIndex(s string) string { + return arrayIndexingRegex.ReplaceAllString(s, "") +} + +// ExtractParamsExpressions will find all `$(params.paramName[int])` expressions +func ExtractParamsExpressions(s string) []string { + return paramIndexingRegex.FindAllString(s, -1) +} + +// ExtractIndexString will find the leftmost match of `[int]` +func ExtractIndexString(s string) string { + return intIndexRegex.FindString(s) +} + +// ExtractIndex will extract int from `[int]` +func ExtractIndex(s string) (int, error) { + return strconv.Atoi(strings.TrimSuffix(strings.TrimPrefix(s, "["), "]")) +} + +// StripStarVarSubExpression strips "$(target[*])"" to get "target" +func StripStarVarSubExpression(s string) string { + return strings.TrimSuffix(strings.TrimSuffix(strings.TrimPrefix(s, "$("), ")"), "[*]") +} diff --git a/vendor/github.com/tektoncd/pipeline/test/controller.go b/vendor/github.com/tektoncd/pipeline/test/controller.go index 9844d3f12d..5c996054b5 100644 --- a/vendor/github.com/tektoncd/pipeline/test/controller.go +++ b/vendor/github.com/tektoncd/pipeline/test/controller.go @@ -25,6 +25,7 @@ import ( // Link in the fakes so they get injected into injection.Fake "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" informersv1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" @@ -71,7 +72,7 @@ type Data struct { TaskRuns []*v1beta1.TaskRun Tasks []*v1beta1.Task ClusterTasks []*v1beta1.ClusterTask - PipelineResources []*v1alpha1.PipelineResource + PipelineResources []*resourcev1alpha1.PipelineResource Runs []*v1alpha1.Run Pods []*corev1.Pod Namespaces []*corev1.Namespace diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh index 78958a00ea..76aec5c0f1 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-common.sh @@ -20,8 +20,9 @@ source $(git rev-parse --show-toplevel)/vendor/github.com/tektoncd/plumbing/scri function install_pipeline_crd() { echo ">> Deploying Tekton Pipelines" - ko resolve -R -f config/ \ - | sed -e 's%"level": "info"%"level": "debug"%' \ + local ko_target="$(mktemp)" + ko resolve -R -f config/ > "${ko_target}" || fail_test "Pipeline image resolve failed" + cat "${ko_target}" | sed -e 's%"level": "info"%"level": "debug"%' \ | sed -e 's%loglevel.controller: "info"%loglevel.controller: "debug"%' \ | sed -e 's%loglevel.webhook: "info"%loglevel.webhook: "debug"%' \ | kubectl apply -R -f - || fail_test "Build pipeline installation failed" @@ -37,6 +38,66 @@ function install_pipeline_crd_version() { verify_pipeline_installation } +function spire_apply() { + if [ $# -lt 2 -o "$1" != "-spiffeID" ]; then + echo "spire_apply requires a spiffeID as the first arg" >&2 + exit 1 + fi + show=$(kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry show $1 $2) + if [ "$show" != "Found 0 entries" ]; then + # delete to recreate + entryid=$(echo "$show" | grep "^Entry ID" | cut -f2 -d:) + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry delete -entryID $entryid + fi + kubectl exec -n spire deployment/spire-server -- \ + /opt/spire/bin/spire-server entry create "$@" +} + +function install_spire() { + echo ">> Deploying Spire" + DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + + echo "Creating SPIRE namespace..." + kubectl create ns spire + + echo "Applying SPIFFE CSI Driver configuration..." + kubectl apply -f "$DIR"/testdata/spire/spiffe-csi-driver.yaml + + echo "Deploying SPIRE server" + kubectl apply -f "$DIR"/testdata/spire/spire-server.yaml + + echo "Deploying SPIRE agent" + kubectl apply -f "$DIR"/testdata/spire/spire-agent.yaml + + wait_until_pods_running spire || fail_test "SPIRE did not come up" + + spire_apply \ + -spiffeID spiffe://example.org/ns/spire/node/example \ + -selector k8s_psat:cluster:example-cluster \ + -selector k8s_psat:agent_ns:spire \ + -selector k8s_psat:agent_sa:spire-agent \ + -node + spire_apply \ + -spiffeID spiffe://example.org/ns/tekton-pipelines/sa/tekton-pipelines-controller \ + -parentID spiffe://example.org/ns/spire/node/example \ + -selector k8s:ns:tekton-pipelines \ + -selector k8s:pod-label:app:tekton-pipelines-controller \ + -selector k8s:sa:tekton-pipelines-controller \ + -admin +} + +function patch_pipline_spire() { + kubectl patch \ + deployment tekton-pipelines-controller \ + -n tekton-pipelines \ + --patch-file "$DIR"/testdata/patch/pipeline-controller-spire.json + + verify_pipeline_installation +} + + function verify_pipeline_installation() { # Make sure that everything is cleaned up in the current namespace. delete_pipeline_resources diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow-alpha.env b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow-alpha.env new file mode 100644 index 0000000000..eacb0ef5eb --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow-alpha.env @@ -0,0 +1,6 @@ +SKIP_INITIALIZE=true +PIPELINE_FEATURE_GATE=alpha +EMBEDDED_STATUS_GATE=minimal +RUN_YAML_TESTS=true +KO_DOCKER_REPO=registry.local:5000 +E2E_GO_TEST_TIMEOUT=40m diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow.env b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow.env new file mode 100644 index 0000000000..ee67102137 --- /dev/null +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests-kind-prow.env @@ -0,0 +1,5 @@ +SKIP_INITIALIZE=true +PIPELINE_FEATURE_GATE=stable +RUN_YAML_TESTS=true +KO_DOCKER_REPO=registry.local:5000 +E2E_GO_TEST_TIMEOUT=40m diff --git a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh index b26240e7f6..496a38c193 100644 --- a/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/e2e-tests.sh @@ -22,8 +22,11 @@ source $(git rev-parse --show-toplevel)/test/e2e-common.sh # Setting defaults PIPELINE_FEATURE_GATE=${PIPELINE_FEATURE_GATE:-stable} +EMBEDDED_STATUS_GATE=${EMBEDDED_STATUS_GATE:-full} SKIP_INITIALIZE=${SKIP_INITIALIZE:="false"} RUN_YAML_TESTS=${RUN_YAML_TESTS:="true"} +SKIP_GO_E2E_TESTS=${SKIP_GO_E2E_TESTS:="false"} +E2E_GO_TEST_TIMEOUT=${E2E_GO_TEST_TIMEOUT:="20m"} failed=0 # Script entry point. @@ -34,9 +37,24 @@ fi header "Setting up environment" -install_pipeline_crd - -failed=0 +function alpha_gate() { + local gate="$1" + if [ "$gate" != "alpha" ] && [ "$gate" != "stable" ] && [ "$gate" != "beta" ] ; then + printf "Invalid gate %s\n" ${gate} + exit 255 + fi + if [ "$gate" == "alpha" ] ; then + printf "Setting up environement for alpha features" + install_spire + install_pipeline_crd + patch_pipline_spire + failed=0 + else + printf "Setting up environement for non-alpha features" + install_pipeline_crd + failed=0 + fi +} function set_feature_gate() { local gate="$1" @@ -50,20 +68,37 @@ function set_feature_gate() { kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch" } +function set_embedded_status() { + local status="$1" + if [ "$status" != "full" ] && [ "$status" != "minimal" ] && [ "$status" != "both" ] ; then + printf "Invalid embedded status %s\n" ${status} + exit 255 + fi + printf "Setting embedded status to %s\n", ${status} + jsonpatch=$(printf "{\"data\": {\"embedded-status\": \"%s\"}}" $1) + echo "feature-flags ConfigMap patch: ${jsonpatch}" + kubectl patch configmap feature-flags -n tekton-pipelines -p "$jsonpatch" +} + function run_e2e() { # Run the integration tests header "Running Go e2e tests" - go_test_e2e -timeout=20m ./test/... || failed=1 + # Skip ./test/*.go tests if SKIP_GO_E2E_TESTS == true + if [ "${SKIP_GO_E2E_TESTS}" != "true" ]; then + go_test_e2e -timeout=${E2E_GO_TEST_TIMEOUT} ./test/... || failed=1 + fi # Run these _after_ the integration tests b/c they don't quite work all the way # and they cause a lot of noise in the logs, making it harder to debug integration # test failures. if [ "${RUN_YAML_TESTS}" == "true" ]; then - go_test_e2e -parallel=4 -mod=readonly -tags=examples -timeout=20m ./test/ || failed=1 + go_test_e2e -mod=readonly -tags=examples -timeout=${E2E_GO_TEST_TIMEOUT} ./test/ || failed=1 fi } +alpha_gate "$PIPELINE_FEATURE_GATE" set_feature_gate "$PIPELINE_FEATURE_GATE" +set_embedded_status "$EMBEDDED_STATUS_GATE" run_e2e (( failed )) && fail_test diff --git a/vendor/github.com/tektoncd/pipeline/test/multiarch_utils.go b/vendor/github.com/tektoncd/pipeline/test/multiarch_utils.go index b52cfdcd92..856dbf462c 100644 --- a/vendor/github.com/tektoncd/pipeline/test/multiarch_utils.go +++ b/vendor/github.com/tektoncd/pipeline/test/multiarch_utils.go @@ -107,7 +107,7 @@ func imageNamesMapping() map[string]string { "registry": getTestImage(registryImage), "node": "node:alpine3.11", "gcr.io/cloud-builders/git": "alpine/git:latest", - "docker:dind": "ibmcom/docker-s390x:dind", + "docker:dind": "ibmcom/docker-s390x:20.10", "docker": "docker:18.06.3", "mikefarah/yq:3": "danielxlee/yq:2.4.0", "stedolan/jq": "ibmcom/jq-s390x:latest", diff --git a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh index 5665f33d73..3cb616f20c 100644 --- a/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh +++ b/vendor/github.com/tektoncd/pipeline/test/presubmit-tests.sh @@ -59,6 +59,19 @@ function check_yaml_lint() { function ko_resolve() { header "Running `ko resolve`" + + cat < .ko.yaml + defaultBaseImage: ghcr.io/distroless/static + baseImageOverrides: + # Use the combined base image for images that should include Windows support. + # NOTE: Make sure this list of images to use the combined base image is in sync with what's in tekton/publish.yaml's 'create-ko-yaml' Task. + github.com/tektoncd/pipeline/cmd/entrypoint: gcr.io/tekton-releases/github.com/tektoncd/pipeline/combined-base-image:latest + github.com/tektoncd/pipeline/cmd/nop: gcr.io/tekton-releases/github.com/tektoncd/pipeline/combined-base-image:latest + github.com/tektoncd/pipeline/cmd/workingdirinit: gcr.io/tekton-releases/github.com/tektoncd/pipeline/combined-base-image:latest + + github.com/tektoncd/pipeline/cmd/git-init: ghcr.io/distroless/git +EOF + KO_DOCKER_REPO=example.com ko resolve --platform=all --push=false -R -f config 1>/dev/null } diff --git a/vendor/knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake/fake_filtered_factory.go b/vendor/knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake/fake_filtered_factory.go new file mode 100644 index 0000000000..3e71d49646 --- /dev/null +++ b/vendor/knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake/fake_filtered_factory.go @@ -0,0 +1,59 @@ +/* +Copyright 2021 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by injection-gen. DO NOT EDIT. + +package fakeFilteredFactory + +import ( + context "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + informers "k8s.io/client-go/informers" + fake "knative.dev/pkg/client/injection/kube/client/fake" + filtered "knative.dev/pkg/client/injection/kube/informers/factory/filtered" + controller "knative.dev/pkg/controller" + injection "knative.dev/pkg/injection" + logging "knative.dev/pkg/logging" +) + +var Get = filtered.Get + +func init() { + injection.Fake.RegisterInformerFactory(withInformerFactory) +} + +func withInformerFactory(ctx context.Context) context.Context { + c := fake.Get(ctx) + untyped := ctx.Value(filtered.LabelKey{}) + if untyped == nil { + logging.FromContext(ctx).Panic( + "Unable to fetch labelkey from context.") + } + labelSelectors := untyped.([]string) + for _, selector := range labelSelectors { + opts := []informers.SharedInformerOption{} + if injection.HasNamespaceScope(ctx) { + opts = append(opts, informers.WithNamespace(injection.GetNamespaceScope(ctx))) + } + opts = append(opts, informers.WithTweakListOptions(func(l *v1.ListOptions) { + l.LabelSelector = selector + })) + ctx = context.WithValue(ctx, filtered.Key{Selector: selector}, + informers.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...)) + } + return ctx +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 08bdeb9147..515d8fd6fb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -340,7 +340,7 @@ github.com/chavacava/garif github.com/chrismellard/docker-credential-acr-env/pkg/credhelper github.com/chrismellard/docker-credential-acr-env/pkg/registry github.com/chrismellard/docker-credential-acr-env/pkg/token -# github.com/cloudevents/sdk-go/v2 v2.5.0 +# github.com/cloudevents/sdk-go/v2 v2.10.1 ## explicit; go 1.14 github.com/cloudevents/sdk-go/v2 github.com/cloudevents/sdk-go/v2/binding @@ -1529,9 +1529,14 @@ github.com/spiffe/go-spiffe/v2/internal/x509util github.com/spiffe/go-spiffe/v2/logger github.com/spiffe/go-spiffe/v2/proto/spiffe/workload github.com/spiffe/go-spiffe/v2/spiffeid +github.com/spiffe/go-spiffe/v2/spiffetls/tlsconfig github.com/spiffe/go-spiffe/v2/svid/jwtsvid github.com/spiffe/go-spiffe/v2/svid/x509svid github.com/spiffe/go-spiffe/v2/workloadapi +# github.com/spiffe/spire-api-sdk v1.3.2 +## explicit; go 1.12 +github.com/spiffe/spire-api-sdk/proto/spire/api/server/entry/v1 +github.com/spiffe/spire-api-sdk/proto/spire/api/types # github.com/src-d/gcfg v1.4.0 ## explicit github.com/src-d/gcfg @@ -1574,20 +1579,24 @@ github.com/syndtr/goleveldb/leveldb/util # github.com/tdakkota/asciicheck v0.1.1 ## explicit; go 1.13 github.com/tdakkota/asciicheck -# github.com/tektoncd/pipeline v0.37.2 +# github.com/tektoncd/pipeline v0.38.1 => ../pipeline ## explicit; go 1.17 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/pipeline github.com/tektoncd/pipeline/pkg/apis/pipeline/pod +github.com/tektoncd/pipeline/pkg/apis/pipeline/v1 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/cloudevent github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1 github.com/tektoncd/pipeline/pkg/apis/validate +github.com/tektoncd/pipeline/pkg/apis/version github.com/tektoncd/pipeline/pkg/client/clientset/versioned github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme +github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1 +github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1/fake github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/fake github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1 @@ -1595,6 +1604,7 @@ github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1bet github.com/tektoncd/pipeline/pkg/client/informers/externalversions github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline +github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1 github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/client/injection/client @@ -1614,6 +1624,7 @@ github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/tas github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun +github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1 github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1 github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned @@ -1637,8 +1648,11 @@ github.com/tektoncd/pipeline/pkg/names github.com/tektoncd/pipeline/pkg/reconciler/events/cache github.com/tektoncd/pipeline/pkg/reconciler/events/cloudevent github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag +github.com/tektoncd/pipeline/pkg/reconciler/testing github.com/tektoncd/pipeline/pkg/remote github.com/tektoncd/pipeline/pkg/remote/oci +github.com/tektoncd/pipeline/pkg/spire +github.com/tektoncd/pipeline/pkg/spire/config github.com/tektoncd/pipeline/pkg/substitution github.com/tektoncd/pipeline/test # github.com/tektoncd/plumbing v0.0.0-20220329085922-d765a5cba75f @@ -2990,6 +3004,7 @@ knative.dev/pkg/client/injection/kube/informers/core/v1/serviceaccount/fake knative.dev/pkg/client/injection/kube/informers/factory knative.dev/pkg/client/injection/kube/informers/factory/fake knative.dev/pkg/client/injection/kube/informers/factory/filtered +knative.dev/pkg/client/injection/kube/informers/factory/filtered/fake knative.dev/pkg/configmap knative.dev/pkg/configmap/informer knative.dev/pkg/controller